repo_id
stringlengths
15
86
file_path
stringlengths
27
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos
hf_public_repos/text-generation-inference/.dockerignore
aml target server/transformers server/flash-attention
0
hf_public_repos
hf_public_repos/text-generation-inference/Cargo.lock
# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aes" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if", "cipher", "cpufeatures", ] [[package]] name = "ahash" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "once_cell", "version_check", ] [[package]] name = "aho-corasick" version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] [[package]] name = "aho-corasick" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] [[package]] name = "anstream" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is-terminal", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" dependencies = [ "anstyle", "windows-sys 0.48.0", ] [[package]] name = "anyhow" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arc-swap" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "async-rustls" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93b21a03b7c21702a0110f9f8d228763a533570deb376119042dabf33c37a01a" dependencies = [ "futures-io", "rustls", "webpki", ] [[package]] name = "async-stream" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", "pin-project-lite", ] [[package]] name = "async-stream-impl" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "async-trait" version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "average" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843ec791d3f24503bbf72bbd5e49a3ab4dbb4bcd0a8ef6b0c908efa73caa27b1" dependencies = [ "easy-cast", "float-ord", "num-traits", ] [[package]] name = "awaitdrop" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771051cdc7eec2dc1b23fbf870bb7fbb89136fe374227c875e377f1eed99a429" dependencies = [ "futures", "generational-arena", "parking_lot", "slotmap", ] [[package]] name = "axum" version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", "bytes", "futures-util", "http", "http-body", "hyper", "itoa", "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", "rustversion", "serde", "serde_json", "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", "tower", "tower-layer", "tower-service", ] [[package]] name = "axum-core" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", "mime", "rustversion", "tower-layer", "tower-service", ] [[package]] name = "axum-tracing-opentelemetry" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "164b95427e83b79583c7699a72b4a6b485a12bbdef5b5c054ee5ff2296d82f52" dependencies = [ "axum", "futures", "http", "opentelemetry 0.18.0", "tower", "tower-http 0.3.5", "tracing", "tracing-opentelemetry 0.18.0", ] [[package]] name = "backtrace" version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bumpalo" version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecount" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bzip2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cached-path" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "097968e38f1319207f057d0f4d76452e4f4f847a5de61c5215379f297fa034f3" dependencies = [ "flate2", "fs2", "glob", "indicatif 0.16.2", "log", "rand", "reqwest", "serde", "serde_json", "sha2", "tar", "tempfile", "thiserror", "zip", ] [[package]] name = "cassowary" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cipher" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", ] [[package]] name = "clap" version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" dependencies = [ "clap_builder", "clap_derive", "once_cell", ] [[package]] name = "clap_builder" version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck", "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "clap_lex" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "colorchoice" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "console" version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", "windows-sys 0.45.0", ] [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset 0.9.0", "scopeguard", ] [[package]] name = "crossbeam-utils" version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] [[package]] name = "crossterm" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84cda67535339806297f1b331d6dd6320470d2a0fe65381e79ee9e156dd3d13" dependencies = [ "bitflags 1.3.2", "crossterm_winapi", "libc", "mio", "parking_lot", "signal-hook", "signal-hook-mio", "winapi", ] [[package]] name = "crossterm_winapi" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "ctrlc" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix", "windows-sys 0.48.0", ] [[package]] name = "darling" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", ] [[package]] name = "darling_core" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", "syn 1.0.109", ] [[package]] name = "darling_macro" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", "syn 1.0.109", ] [[package]] name = "dashmap" version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", "hashbrown 0.14.0", "lock_api", "once_cell", "parking_lot_core", ] [[package]] name = "derive_builder" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ "darling", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "derive_builder_macro" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" dependencies = [ "derive_builder_core", "syn 1.0.109", ] [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", "subtle", ] [[package]] name = "dirs" version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", "winapi", ] [[package]] name = "easy-cast" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bd102ee8c418348759919b83b81cdbdc933ffe29740b903df448b4bafaa348e" dependencies = [ "libm", ] [[package]] name = "either" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encode_unicode" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.48.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "esaxx-rs" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f748b253ceca9fed5f42f8b5ceb3851e93102199bc25b64b65369f76e5c0a35" dependencies = [ "cc", ] [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "filetime" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if", "libc", "redox_syscall 0.2.16", "windows-sys 0.48.0", ] [[package]] name = "fixedbitset" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "float-ord" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d" [[package]] name = "float_eq" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853" [[package]] name = "flume" version = "0.10.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" dependencies = [ "futures-core", "futures-sink", "nanorand", "pin-project", "spin 0.9.8", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ "foreign-types-shared", ] [[package]] name = "foreign-types-shared" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] [[package]] name = "fs2" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", "winapi", ] [[package]] name = "futures" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "futures-sink" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generational-arena" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7" dependencies = [ "cfg-if", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "js-sys", "libc", "wasi", "wasm-bindgen", ] [[package]] name = "gimli" version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "grpc-metadata" version = "0.1.0" dependencies = [ "opentelemetry 0.19.0", "tonic 0.9.2", "tracing", "tracing-opentelemetry 0.19.0", ] [[package]] name = "h2" version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap 1.9.3", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ "ahash", ] [[package]] name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ "digest", ] [[package]] name = "hostname" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", "winapi", ] [[package]] name = "http" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", "pin-project-lite", ] [[package]] name = "http-range-header" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "socket2", "tokio", "tower-service", "tracing", "want", ] [[package]] name = "hyper-timeout" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite", "tokio", "tokio-io-timeout", ] [[package]] name = "hyper-tls" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", "hyper", "native-tls", "tokio", "tokio-native-tls", ] [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", ] [[package]] name = "indexmap" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", "serde", ] [[package]] name = "indicatif" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" dependencies = [ "console", "lazy_static", "number_prefix 0.3.0", "regex", ] [[package]] name = "indicatif" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" dependencies = [ "console", "lazy_static", "number_prefix 0.4.0", "regex", ] [[package]] name = "inout" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ "generic-array", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", "windows-sys 0.48.0", ] [[package]] name = "ipnet" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix 0.38.4", "windows-sys 0.48.0", ] [[package]] name = "itertools" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" [[package]] name = "jobserver" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] [[package]] name = "js-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libm" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "linux-raw-sys" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "lock_api" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach2" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" dependencies = [ "libc", ] [[package]] name = "macro_rules_attribute" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf0c9b980bf4f3a37fd7b1c066941dd1b1d0152ce6ee6e8fe8c49b9f6810d862" dependencies = [ "macro_rules_attribute-proc_macro", "paste", ] [[package]] name = "macro_rules_attribute-proc_macro" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58093314a45e00c77d5c508f76e77c3396afbbc0d01506e7fae47b018bac2b1d" [[package]] name = "match_cfg" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] [[package]] name = "matchit" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] [[package]] name = "memoffset" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] [[package]] name = "metrics" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ "ahash", "metrics-macros", "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ "base64 0.21.2", "hyper", "indexmap 1.9.3", "ipnet", "metrics", "metrics-util", "quanta", "thiserror", "tokio", "tracing", ] [[package]] name = "metrics-macros" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "metrics-util" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.1", "metrics", "num_cpus", "quanta", "sketches-ddsketch", ] [[package]] name = "mime" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", ] [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi", "windows-sys 0.48.0", ] [[package]] name = "monostate" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f3f57a8802842f648026a33c3d2e3bb41bb309a35b1609bd7ef2b060b8b6b1b" dependencies = [ "monostate-impl", "serde", ] [[package]] name = "monostate-impl" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e72f4d2e10fde62a0f2fcb4b44ccbf4f9899dcc30c9193449f8dfb9123d71377" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "multimap" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "muxado" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e92b89ac3127251efde6f5a9586e5aae99468d06fcf9f133b377f58d5ed66446" dependencies = [ "async-trait", "awaitdrop", "bitflags 1.3.2", "bytes", "futures", "pin-project", "rand", "thiserror", "tokio", "tokio-util", "tracing", ] [[package]] name = "nanorand" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ "getrandom", ] [[package]] name = "native-tls" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", "log", "openssl", "openssl-probe", "openssl-sys", "schannel", "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "ngrok" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87e211f407b0a084f720823a00c956aeab2c15dfe7a61760d93227bbaf048026" dependencies = [ "arc-swap", "async-rustls", "async-trait", "awaitdrop", "axum", "base64 0.13.1", "bytes", "futures", "hostname", "hyper", "muxado", "once_cell", "parking_lot", "regex", "rustls-pemfile", "serde", "serde_json", "thiserror", "tokio", "tokio-retry", "tokio-util", "tracing", "windows-sys 0.45.0", ] [[package]] name = "nix" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", "memoffset 0.7.1", "pin-utils", "static_assertions", ] [[package]] name = "nohash-hasher" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "ntapi" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", "libm", ] [[package]] name = "num_cpus" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "num_threads" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "number_prefix" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "onig" version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c4b31c8722ad9171c6d77d3557db078cab2bd50afcc9d09c8b315c59df8ca4f" dependencies = [ "bitflags 1.3.2", "libc", "once_cell", "onig_sys", ] [[package]] name = "onig_sys" version = "69.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b829e3d7e9cc74c7e315ee8edb185bf4190da5acde74afd7fc59c35b1f086e7" dependencies = [ "cc", "pkg-config", ] [[package]] name = "openssl" version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", "once_cell", "openssl-macros", "openssl-sys", ] [[package]] name = "openssl-macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "opentelemetry" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" dependencies = [ "opentelemetry_api 0.18.0", "opentelemetry_sdk 0.18.0", ] [[package]] name = "opentelemetry" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4b8347cc26099d3aeee044065ecc3ae11469796b4d65d065a23a584ed92a6f" dependencies = [ "opentelemetry_api 0.19.0", "opentelemetry_sdk 0.19.0", ] [[package]] name = "opentelemetry-otlp" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8af72d59a4484654ea8eb183fea5ae4eb6a41d7ac3e3bae5f4d2a282a3a7d3ca" dependencies = [ "async-trait", "futures", "futures-util", "http", "opentelemetry 0.19.0", "opentelemetry-proto", "prost", "thiserror", "tokio", "tonic 0.8.3", ] [[package]] name = "opentelemetry-proto" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "045f8eea8c0fa19f7d48e7bc3128a39c2e5c533d5c61298c548dfefc1064474c" dependencies = [ "futures", "futures-util", "opentelemetry 0.19.0", "prost", "tonic 0.8.3", ] [[package]] name = "opentelemetry_api" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" dependencies = [ "fnv", "futures-channel", "futures-util", "indexmap 1.9.3", "js-sys", "once_cell", "pin-project-lite", "thiserror", ] [[package]] name = "opentelemetry_api" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed41783a5bf567688eb38372f2b7a8530f5a607a4b49d38dd7573236c23ca7e2" dependencies = [ "fnv", "futures-channel", "futures-util", "indexmap 1.9.3", "once_cell", "pin-project-lite", "thiserror", "urlencoding", ] [[package]] name = "opentelemetry_sdk" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" dependencies = [ "async-trait", "crossbeam-channel", "dashmap", "fnv", "futures-channel", "futures-executor", "futures-util", "once_cell", "opentelemetry_api 0.18.0", "percent-encoding", "rand", "thiserror", "tokio", "tokio-stream", ] [[package]] name = "opentelemetry_sdk" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b3a2a91fdbfdd4d212c0dcc2ab540de2c2bcbbd90be17de7a7daf8822d010c1" dependencies = [ "async-trait", "crossbeam-channel", "dashmap", "fnv", "futures-channel", "futures-executor", "futures-util", "once_cell", "opentelemetry_api 0.19.0", "percent-encoding", "rand", "thiserror", "tokio", "tokio-stream", ] [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "papergrid" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae7891b22598926e4398790c8fe6447930c72a67d36d983a49d6ce682ce83290" dependencies = [ "bytecount", "fnv", "unicode-width", ] [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", "redox_syscall 0.3.5", "smallvec", "windows-targets 0.48.1", ] [[package]] name = "password-hash" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", "rand_core", "subtle", ] [[package]] name = "paste" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" [[package]] name = "pbkdf2" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest", "hmac", "password-hash", "sha2", ] [[package]] name = "percent-encoding" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "petgraph" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap 1.9.3", ] [[package]] name = "pin-project" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "pin-project-lite" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "portable-atomic" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d220334a184db82b31b83f5ff093e3315280fb2b6bbc032022b2304a509aab7a" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", "syn 1.0.109", ] [[package]] name = "proc-macro-error" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", "syn 1.0.109", "version_check", ] [[package]] name = "proc-macro-error-attr" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", "version_check", ] [[package]] name = "proc-macro2" version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" dependencies = [ "unicode-ident", ] [[package]] name = "prost" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", ] [[package]] name = "prost-build" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", "itertools 0.10.5", "lazy_static", "log", "multimap", "petgraph", "prettyplease", "prost", "prost-types", "regex", "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] [[package]] name = "quanta" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", "mach2", "once_cell", "raw-cpuid", "wasi", "web-sys", "winapi", ] [[package]] name = "quote" version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "ratatui" version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcc0d032bccba900ee32151ec0265667535c230169f5a011154cdcd984e16829" dependencies = [ "bitflags 1.3.2", "cassowary", "crossterm", "unicode-segmentation", "unicode-width", ] [[package]] name = "raw-cpuid" version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "rayon" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-cond" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd1259362c9065e5ea39a789ef40b1e3fd934c94beb7b5ab3ac6629d3b5e7cb7" dependencies = [ "either", "itertools 0.8.2", "rayon", ] [[package]] name = "rayon-core" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", "num_cpus", ] [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick 1.0.2", "memchr", "regex-automata 0.3.3", "regex-syntax 0.7.4", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-automata" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" dependencies = [ "aho-corasick 1.0.2", "memchr", "regex-syntax 0.7.4", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ "base64 0.21.2", "bytes", "encoding_rs", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-tls", "ipnet", "js-sys", "log", "mime", "native-tls", "once_cell", "percent-encoding", "pin-project-lite", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "winreg", ] [[package]] name = "ring" version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", "once_cell", "spin 0.5.2", "untrusted", "web-sys", "winapi", ] [[package]] name = "rust-embed" version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661" dependencies = [ "rust-embed-impl", "rust-embed-utils", "walkdir", ] [[package]] name = "rust-embed-impl" version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", "shellexpand", "syn 2.0.25", "walkdir", ] [[package]] name = "rust-embed-utils" version = "7.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" dependencies = [ "sha2", "walkdir", ] [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.8", "windows-sys 0.48.0", ] [[package]] name = "rustix" version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ "bitflags 2.3.3", "errno", "libc", "linux-raw-sys 0.4.3", "windows-sys 0.48.0", ] [[package]] name = "rustls" version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", "sct", "webpki", ] [[package]] name = "rustls-pemfile" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ "base64 0.21.2", ] [[package]] name = "rustversion" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "ryu" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "schannel" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", ] [[package]] name = "security-framework" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "serde_json" version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5062a995d481b2308b6064e9af76011f2921c35f97b0468811ed9f6cd91dfed" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "serde_path_to_error" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acc4422959dd87a76cb117c191dcbffc20467f06c9100b76721dab370f24d3a" dependencies = [ "itoa", "serde", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "sha1" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sharded-slab" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "shellexpand" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" dependencies = [ "dirs", ] [[package]] name = "signal-hook" version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", ] [[package]] name = "signal-hook-mio" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" dependencies = [ "libc", "mio", "signal-hook", ] [[package]] name = "signal-hook-registry" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "sketches-ddsketch" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] [[package]] name = "slotmap" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342" dependencies = [ "version_check", ] [[package]] name = "smallvec" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "socket2" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] [[package]] name = "spm_precompiled" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5851699c4033c63636f7ea4cf7b7c1f1bf06d0cc03cfb42e711de5a5c46cf326" dependencies = [ "base64 0.13.1", "nom", "serde", "unicode-segmentation", ] [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" version = "0.29.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "751e810399bba86e9326f5762b7f32ac5a085542df78da6a78d94e07d14d7c11" dependencies = [ "cfg-if", "core-foundation-sys", "libc", "ntapi", "once_cell", "winapi", ] [[package]] name = "tabled" version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce69a5028cd9576063ec1f48edb2c75339fd835e6094ef3e05b3a079bf594a6" dependencies = [ "papergrid", "tabled_derive", "unicode-width", ] [[package]] name = "tabled_derive" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99f688a08b54f4f02f0a3c382aefdb7884d3d69609f785bd253dc033243e3fe4" dependencies = [ "heck", "proc-macro-error", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "tar" version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec96d2ffad078296368d46ff1cb309be1c23c513b4ab0e22a45de0185275ac96" dependencies = [ "filetime", "libc", "xattr", ] [[package]] name = "tempfile" version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ "autocfg", "cfg-if", "fastrand", "redox_syscall 0.3.5", "rustix 0.37.23", "windows-sys 0.48.0", ] [[package]] name = "text-generation-benchmark" version = "1.0.0" dependencies = [ "average", "clap", "crossterm", "float-ord", "ratatui", "serde", "serde_json", "tabled", "text-generation-client", "thiserror", "tokenizers", "tokio", "tracing", "tracing-subscriber", ] [[package]] name = "text-generation-client" version = "1.0.0" dependencies = [ "futures", "grpc-metadata", "prost", "prost-build", "thiserror", "tokio", "tonic 0.9.2", "tonic-build", "tower", "tracing", ] [[package]] name = "text-generation-launcher" version = "1.0.0" dependencies = [ "clap", "ctrlc", "float_eq", "nix", "reqwest", "serde", "serde_json", "tracing", "tracing-subscriber", "vergen", ] [[package]] name = "text-generation-router" version = "1.0.0" dependencies = [ "async-stream", "axum", "axum-tracing-opentelemetry", "clap", "flume", "futures", "metrics", "metrics-exporter-prometheus", "ngrok", "nohash-hasher", "opentelemetry 0.19.0", "opentelemetry-otlp", "rand", "reqwest", "serde", "serde_json", "text-generation-client", "thiserror", "tokenizers", "tokio", "tower-http 0.4.1", "tracing", "tracing-opentelemetry 0.19.0", "tracing-subscriber", "utoipa", "utoipa-swagger-ui", "vergen", ] [[package]] name = "thiserror" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "thread_local" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "time" version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", "libc", "num_threads", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokenizers" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cf49017523bf0bc01c9966f172c5f120bbb7b96cccd1708772dd42e767fb9f5" dependencies = [ "aho-corasick 0.7.20", "cached-path", "clap", "derive_builder", "dirs", "esaxx-rs", "getrandom", "indicatif 0.15.0", "itertools 0.9.0", "lazy_static", "log", "macro_rules_attribute", "monostate", "onig", "paste", "rand", "rayon", "rayon-cond", "regex", "regex-syntax 0.6.29", "reqwest", "serde", "serde_json", "spm_precompiled", "thiserror", "unicode-normalization-alignments", "unicode-segmentation", "unicode_categories", ] [[package]] name = "tokio" version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-io-timeout" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite", "tokio", ] [[package]] name = "tokio-macros" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "tokio-native-tls" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", ] [[package]] name = "tokio-retry" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ "pin-project", "rand", "tokio", ] [[package]] name = "tokio-stream" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tonic" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", "axum", "base64 0.13.1", "bytes", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost", "prost-derive", "tokio", "tokio-stream", "tokio-util", "tower", "tower-layer", "tower-service", "tracing", "tracing-futures", ] [[package]] name = "tonic" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", "axum", "base64 0.21.2", "bytes", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost", "tokio", "tokio-stream", "tower", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tonic-build" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "quote", "syn 1.0.109", ] [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand", "slab", "tokio", "tokio-util", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-http" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "bitflags 1.3.2", "bytes", "futures-core", "futures-util", "http", "http-body", "http-range-header", "pin-project-lite", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-http" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" dependencies = [ "bitflags 2.3.3", "bytes", "futures-core", "futures-util", "http", "http-body", "http-range-header", "pin-project-lite", "tower-layer", "tower-service", ] [[package]] name = "tower-layer" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "tracing-core" version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-futures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ "pin-project", "tracing", ] [[package]] name = "tracing-log" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", "tracing-core", ] [[package]] name = "tracing-opentelemetry" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" dependencies = [ "once_cell", "opentelemetry 0.18.0", "tracing", "tracing-core", "tracing-log", "tracing-subscriber", ] [[package]] name = "tracing-opentelemetry" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00a39dcf9bfc1742fa4d6215253b33a6e474be78275884c216fc2a06267b3600" dependencies = [ "once_cell", "opentelemetry 0.19.0", "tracing", "tracing-core", "tracing-log", "tracing-subscriber", ] [[package]] name = "tracing-serde" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "serde", "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", "tracing-serde", ] [[package]] name = "try-lock" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicase" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-normalization-alignments" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43f613e4fa046e69818dd287fdc4bc78175ff20331479dab6e1b0f98d57062de" dependencies = [ "smallvec", ] [[package]] name = "unicode-segmentation" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode_categories" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" [[package]] name = "untrusted" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "urlencoding" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "utoipa" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520434cac5c98120177d5cc15be032703f6dca7d5ef82e725c798113b375000a" dependencies = [ "indexmap 2.0.0", "serde", "serde_json", "utoipa-gen", ] [[package]] name = "utoipa-gen" version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e22e88a487b6e0374533871b79b1f5ded05671bd0936bd547eb42f82fb9060d" dependencies = [ "proc-macro-error", "proc-macro2", "quote", "regex", "syn 2.0.25", ] [[package]] name = "utoipa-swagger-ui" version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4602d7100d3cfd8a086f30494e68532402ab662fa366c9d201d677e33cee138d" dependencies = [ "axum", "mime_guess", "regex", "rust-embed", "serde", "serde_json", "utoipa", "zip", ] [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", "rustc_version", "rustversion", "sysinfo", "time", ] [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn 2.0.25", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", ] [[package]] name = "which" version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", "once_cell", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.1", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", "windows_x86_64_gnullvm 0.48.0", "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winreg" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] [[package]] name = "xattr" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc" dependencies = [ "libc", ] [[package]] name = "zip" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ "aes", "byteorder", "bzip2", "constant_time_eq", "crc32fast", "crossbeam-utils", "flate2", "hmac", "pbkdf2", "sha1", "time", "zstd", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", "pkg-config", ]
0
hf_public_repos
hf_public_repos/text-generation-inference/Cargo.toml
[workspace] members = [ "benchmark", "router", "router/client", "router/grpc-metadata", "launcher" ] [workspace.package] version = "1.0.0" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" [profile.release] debug = 1 incremental = true lto = "off" panic = "abort"
0
hf_public_repos
hf_public_repos/text-generation-inference/Dockerfile
# Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.71 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef as planner COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder ARG GIT_SHA ARG DOCKER_LABEL RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --release --recipe-path recipe.json COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo build --release # Python builder # Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile FROM debian:bullseye-slim as pytorch-install ARG PYTORCH_VERSION=2.0.1 ARG PYTHON_VERSION=3.9 # Keep in sync with `server/pyproject.toml ARG CUDA_VERSION=11.8 ARG MAMBA_VERSION=23.1.0-1 ARG CUDA_CHANNEL=nvidia ARG INSTALL_CHANNEL=pytorch # Automatically set by buildx ARG TARGETPLATFORM ENV PATH /opt/conda/bin:$PATH RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ ca-certificates \ ccache \ curl \ git && \ rm -rf /var/lib/apt/lists/* # Install conda # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh # Install pytorch # On arm64 we exit with an error code RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" pytorch==$PYTORCH_VERSION "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \ esac && \ /opt/conda/bin/conda clean -ya # CUDA kernels builder image FROM pytorch-install as kernel-builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ ninja-build \ && rm -rf /var/lib/apt/lists/* RUN /opt/conda/bin/conda install -c "nvidia/label/cuda-11.8.0" cuda==11.8 && \ /opt/conda/bin/conda clean -ya # Build Flash Attention CUDA kernels FROM kernel-builder as flash-att-builder WORKDIR /usr/src COPY server/Makefile-flash-att Makefile # Build specific version of flash attention RUN make build-flash-attention # Build Flash Attention v2 CUDA kernels FROM kernel-builder as flash-att-v2-builder WORKDIR /usr/src COPY server/Makefile-flash-att-v2 Makefile # Build specific version of flash attention v2 RUN make build-flash-attention-v2 # Build Transformers exllama kernels FROM kernel-builder as exllama-kernels-builder WORKDIR /usr/src COPY server/exllama_kernels/ . # Build specific version of transformers RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" python setup.py build # Build Transformers CUDA kernels FROM kernel-builder as custom-kernels-builder WORKDIR /usr/src COPY server/custom_kernels/ . # Build specific version of transformers RUN python setup.py build # Build vllm CUDA kernels FROM kernel-builder as vllm-builder WORKDIR /usr/src COPY server/Makefile-vllm Makefile # Build specific version of vllm RUN make build-vllm # Text Generation Inference base image FROM nvidia/cuda:11.8.0-base-ubuntu20.04 as base # Conda env ENV PATH=/opt/conda/bin:$PATH \ CONDA_PREFIX=/opt/conda # Text Generation Inference base env ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ make \ && rm -rf /var/lib/apt/lists/* # Copy conda with PyTorch installed COPY --from=pytorch-install /opt/conda /opt/conda # Copy build artifacts from flash attention builder COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy build artifacts from flash attention v2 builder COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy build artifacts from custom kernels builder COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy build artifacts from exllama kernels builder COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy builds artifacts from vllm builder COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Install flash-attention dependencies RUN pip install einops --no-cache-dir # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements.txt && \ pip install ".[bnb, accelerate, quantize]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ g++ \ && rm -rf /var/lib/apt/lists/* # AWS Sagemaker compatbile image FROM base as sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
0
hf_public_repos
hf_public_repos/text-generation-inference/LICENSE
Hugging Face Optimized Inference License 1.0 (HFOILv1.0) This License Agreement governs the use of the Software and its Modifications. It is a binding agreement between the Licensor and You. This License Agreement shall be referred to as Hugging Face Optimized Inference License 1.0 or HFOILv1.0. We may publish revised versions of this License Agreement from time to time. Each version will be given a distinguished number. By downloading, accessing, modifying, distributing or otherwise using the Software, You consent to all of the terms and conditions below. So, if You do not agree with those, please do not download, access, modify, distribute, or use the Software. 1. PERMISSIONS You may use, modify and distribute the Software pursuant to the following terms and conditions: Copyright License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare, publicly display, publicly perform, sublicense under the terms herein, and distribute the Software and Modifications of the Software. Patent License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free patent license to make, have made, Use, offer to sell, sell, import, and otherwise transfer the Software, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Software to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Software or a Contribution incorporated within the Software constitutes direct or contributory patent infringement, then any rights granted to You under this License Agreement for the Software shall terminate as of the date such litigation is filed. No other rights. All rights not expressly granted herein are retained. 2. RESTRICTIONS You may not distribute the Software as a hosted or managed, and paid service, where the service grants users access to any substantial set of the features or functionality of the Software. If you wish to do so, You will need to be granted additional rights from the Licensor which will be subject to a separate mutually agreed agreement. You may not sublicense the Software under any other terms than those listed in this License. 3. OBLIGATIONS When You modify the Software, You agree to: - attach a notice stating the Modifications of the Software You made; and - attach a notice stating that the Modifications of the Software are released under this License Agreement. When You distribute the Software or Modifications of the Software, You agree to: - give any recipients of the Software a copy of this License Agreement; - retain all Explanatory Documentation; and if sharing the Modifications of the Software, add Explanatory Documentation documenting the changes made to create the Modifications of the Software; - retain all copyright, patent, trademark and attribution notices. 4. MISCELLANEOUS Termination. Licensor reserves the right to restrict Use of the Software in violation of this License Agreement, upon which Your licenses will automatically terminate. Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Software by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. Trademarks and related. Nothing in this License Agreement permits You (i) to make Use of Licensors’ trademarks, trade names, or logos, (ii) otherwise suggest endorsement by Licensor, or (iii) misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. Output You generate. Licensor claims no rights in the Output. You agree not to contravene any provision as stated in the License Agreement with your Use of the Output. Disclaimer of Warranty. Except as expressly provided otherwise herein, and to the fullest extent permitted by law, Licensor provides the Software (and each Contributor provides its Contributions) AS IS, and Licensor disclaims all warranties or guarantees of any kind, express or implied, whether arising under any law or from any usage in trade, or otherwise including but not limited to the implied warranties of merchantability, non-infringement, quiet enjoyment, fitness for a particular purpose, or otherwise. You are solely responsible for determining the appropriateness of the Software and Modifications of the Software for your purposes (including your use or distribution of the Software and Modifications of the Software), and assume any risks associated with Your exercise of permissions under this License Agreement. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License Agreement or out of the Use or inability to Use the Software (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, model failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. Accepting Warranty or Additional Liability. While sharing the Software or Modifications of the Software thereof, You may choose to offer and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License Agreement. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of Licensor or any other Contributor, and you hereby agree to indemnify, defend, and hold Licensor and each other Contributor (and their successors or assigns) harmless for any liability incurred by, or claims asserted against, such Licensor or Contributor (and their successors or assigns) by reason of your accepting any such warranty or additional liability. Severability. This License Agreement is a license of copyright and patent rights and an agreement in contract between You and the Licensor. If any provision of this License Agreement is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. 5. DEFINITIONS “Contribution” refers to any work of authorship, including the original version of the Software and any Modifications of the Software that is intentionally submitted to Licensor for inclusion in the Software by the copyright owner or by an individual or entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, “submitted” means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Software, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as “Not a Contribution.” “Contributor” refers to Licensor and any individual or entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Software. “Data” refers to a collection of information extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License Agreement. “Explanatory Documentation” refers to any documentation or related information including but not limited to model cards or data cards dedicated to inform the public about the characteristics of the Software. Explanatory documentation is not licensed under this License. "License Agreement" refers to these terms and conditions. “Licensor” refers to the rights owners or entity authorized by the rights owners that are granting the terms and conditions of this License Agreement. “Model” refers to machine-learning based assemblies (including checkpoints), consisting of learnt weights and parameters (including optimizer states), corresponding to a model architecture as embodied in Software source code. Source code is not licensed under this License Agreement. “Modifications of the Software” refers to all changes to the Software, including without limitation derivative works of the Software. “Output” refers to the results of operating the Software. “Share” refers to any transmission, reproduction, publication or other sharing of the Software or Modifications of the Software to a third party, including providing the Softwaire as a hosted service made available by electronic or other remote means, including - but not limited to - API-based or web access. “Software” refers to the software and Model (or parts of either) that Licensor makes available under this License Agreement. “Third Parties” refers to individuals or legal entities that are not under common control with Licensor or You. “Use” refers to anything You or your representatives do with the Software, including but not limited to generating any Output, fine tuning, updating, running, training, evaluating and/or reparametrizing the Model. "You" (or "Your") refers to an individual or Legal Entity exercising permissions granted by this License Agreement and/or making Use of the Software for whichever purpose and in any field of Use.
0
hf_public_repos
hf_public_repos/text-generation-inference/Makefile
install-server: cd server && make install install-custom-kernels: if [ "$$BUILD_EXTENSIONS" = "True" ]; then cd server/custom_kernels && python setup.py install; else echo "Custom kernels are disabled, you need to set the BUILD_EXTENSIONS environment variable to 'True' in order to build them. (Please read the docs, kernels might not work on all hardware)"; fi install-integration-tests: cd integration-tests && pip install -r requirements.txt cd clients/python && pip install . install-router: cd router && cargo install --path . install-launcher: cd launcher && cargo install --path . install-benchmark: cd benchmark && cargo install --path . install: install-server install-router install-launcher install-custom-kernels server-dev: cd server && make run-dev router-dev: cd router && cargo run -- --port 8080 rust-tests: install-router install-launcher cargo test integration-tests: install-integration-tests pytest -s -vv -m "not private" integration-tests update-integration-tests: install-integration-tests pytest -s -vv --snapshot-update integration-tests python-server-tests: HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests python-client-tests: pytest clients/python/tests python-tests: python-server-tests python-client-tests run-falcon-7b-instruct: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080 run-falcon-7b-instruct-quantize: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080 clean: rm -rf target aml
0
hf_public_repos
hf_public_repos/text-generation-inference/README.md
<div align="center"> ![image](https://github.com/huggingface/text-generation-inference/assets/3841370/38ba1531-ea0d-4851-b31a-a6d4ddc944b0) # Text Generation Inference <a href="https://github.com/huggingface/text-generation-inference"> <img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social"> </a> <a href="https://huggingface.github.io/text-generation-inference"> <img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational"> </a> A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co) to power Hugging Chat, the Inference API and Inference Endpoint. </div> ## Table of contents - [Features](#features) - [Optimized Architectures](#optimized-architectures) - [Get Started](#get-started) - [Docker](#docker) - [API Documentation](#api-documentation) - [Using a private or gated model](#using-a-private-or-gated-model) - [A note on Shared Memory](#a-note-on-shared-memory-shm) - [Distributed Tracing](#distributed-tracing) - [Local Install](#local-install) - [CUDA Kernels](#cuda-kernels) - [Run Falcon](#run-falcon) - [Run](#run) - [Quantization](#quantization) - [Develop](#develop) - [Testing](#testing) - [Other supported hardware](#other-supported-hardware) ## Features - Serve the most popular Large Language Models with a simple launcher - Tensor Parallelism for faster inference on multiple GPUs - Token streaming using Server-Sent Events (SSE) - [Continuous batching of incoming requests](https://github.com/huggingface/text-generation-inference/tree/main/router) for increased total throughput - Optimized transformers code for inference using [flash-attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures - Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323) - [Safetensors](https://github.com/huggingface/safetensors) weight loading - Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) - Logits warper (temperature scaling, top-p, top-k, repetition penalty, more details see [transformers.LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor)) - Stop sequences - Log probabilities - Production ready (distributed tracing with Open Telemetry, Prometheus metrics) ## Optimized architectures - [BLOOM](https://huggingface.co/bigscience/bloom) - [FLAN-T5](https://huggingface.co/google/flan-t5-xxl) - [Galactica](https://huggingface.co/facebook/galactica-120b) - [GPT-Neox](https://huggingface.co/EleutherAI/gpt-neox-20b) - [Llama](https://github.com/facebookresearch/llama) - [OPT](https://huggingface.co/facebook/opt-66b) - [SantaCoder](https://huggingface.co/bigcode/santacoder) - [Starcoder](https://huggingface.co/bigcode/starcoder) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b) - [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b) - [MPT](https://huggingface.co/mosaicml/mpt-30b) - [Llama V2](https://huggingface.co/meta-llama) Other architectures are supported on a best effort basis using: `AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")` or `AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")` ## Get started ### Docker The easiest way of getting started is using the official Docker container: ```shell model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model ``` **Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli: ``` text-generation-launcher --help ``` You can then query the model using either the `/generate` or `/generate_stream` routes: ```shell curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ```shell curl 127.0.0.1:8080/generate_stream \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` or from Python: ```shell pip install text-generation ``` ```python from text_generation import Client client = Client("http://127.0.0.1:8080") print(client.generate("What is Deep Learning?", max_new_tokens=20).generated_text) text = "" for response in client.generate_stream("What is Deep Learning?", max_new_tokens=20): if not response.token.special: text += response.token.text print(text) ``` ### API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available at: [https://huggingface.github.io/text-generation-inference](https://huggingface.github.io/text-generation-inference). ### Using a private or gated model You have the option to utilize the `HUGGING_FACE_HUB_TOKEN` environment variable for configuring the token employed by `text-generation-inference`. This allows you to gain access to protected resources. For example, if you want to serve the gated Llama V2 model variants: 1. Go to https://huggingface.co/settings/tokens 2. Copy your cli READ token 3. Export `HUGGING_FACE_HUB_TOKEN=<your cli READ token>` or with Docker: ```shell model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run token=<your cli READ token> docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model ``` ### A note on Shared Memory (shm) [`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by `PyTorch` to do distributed training/inference. `text-generation-inference` make use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models. In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if peer-to-peer using NVLink or PCI is not possible. To allow the container to use 1G of Shared Memory and support SHM sharing, we add `--shm-size 1g` on the above command. If you are running `text-generation-inference` inside `Kubernetes`. You can also add Shared Memory to the container by creating a volume with: ```yaml - name: shm emptyDir: medium: Memory sizeLimit: 1Gi ``` and mounting it to `/dev/shm`. Finally, you can also disable SHM sharing by using the `NCCL_SHM_DISABLE=1` environment variable. However, note that this will impact performance. ### Distributed Tracing `text-generation-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature by setting the address to an OTLP collector with the `--otlp-endpoint` argument. ### Local install You can also opt to install `text-generation-inference` locally. First [install Rust](https://rustup.rs/) and create a Python virtual environment with at least Python 3.9, e.g. using `conda`: ```shell curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh conda create -n text-generation-inference python=3.9 conda activate text-generation-inference ``` You may also need to install Protoc. On Linux: ```shell PROTOC_ZIP=protoc-21.12-linux-x86_64.zip curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' rm -f $PROTOC_ZIP ``` On MacOS, using Homebrew: ```shell brew install protobuf ``` Then run: ```shell BUILD_EXTENSIONS=True make install # Install repository and HF/transformer fork with CUDA kernels make run-falcon-7b-instruct ``` **Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run: ```shell sudo apt-get install libssl-dev gcc -y ``` ### CUDA Kernels The custom CUDA kernels are only tested on NVIDIA A100s. If you have any installation or runtime issues, you can remove the kernels by using the `DISABLE_CUSTOM_KERNELS=True` environment variable. Be aware that the official Docker image has them enabled by default. ## Run Falcon ### Run ```shell make run-falcon-7b-instruct ``` ### Quantization You can also quantize the weights with bitsandbytes to reduce the VRAM requirement: ```shell make run-falcon-7b-instruct-quantize ``` 4bit quantization is available using the [NF4 and FP4 data types from bitsandbytes](https://arxiv.org/pdf/2305.14314.pdf). It can be enabled by providing `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` as a command line argument to `text-generation-launcher`. ## Develop ```shell make server-dev make router-dev ``` ## Testing ```shell # python make python-server-tests make python-client-tests # or both server and client tests make python-tests # rust cargo tests make rust-tests # integration tests make integration-tests ``` ## Other supported hardware TGI is also supported on the following AI hardware accelerators: - *Habana first-gen Gaudi and Gaudi2:* checkout [here](https://github.com/huggingface/optimum-habana/tree/main/text-generation-inference) how to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index)
0
hf_public_repos
hf_public_repos/text-generation-inference/rust-toolchain.toml
[toolchain] channel = "1.70.0" components = ["rustfmt", "clippy"]
0
hf_public_repos
hf_public_repos/text-generation-inference/sagemaker-entrypoint.sh
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi if [[ -n "${SM_NUM_GPUS}" ]]; then export NUM_SHARD="${SM_NUM_GPUS}" fi if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then export QUANTIZE="${HF_MODEL_QUANTIZE}" fi if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}" fi text-generation-launcher --port 8080
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/Cargo.toml
[package] name = "text-generation-benchmark" description = "Text Generation Benchmarking tool" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-benchmark" path = "src/main.rs" [dependencies] average = "0.13" clap = { version = "4.1.4", features = ["derive", "env"] } crossterm = "0.26" float-ord = "0.3.2" serde = {version = "1.0.142", features = ["derive"]} serde_json = "1.0" tabled = "0.12.0" text-generation-client = { path = "../router/client" } thiserror = "1.0.38" tokenizers = "0.13.3" tokio = { version = "1.25.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] } tui = {package = "ratatui", version = "0.20", default-features = false, features = ["crossterm"]} tracing = "0.1.37" tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter"] }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/README.md
<div align="center"> # Text Generation Inference benchmarking tool ![benchmark](../assets/benchmark.png) </div> A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) and powered by [tui](https://github.com/tui-rs-revival/ratatui). ## Install ```shell make install-benchmark ``` ## Run First, start `text-generation-inference`: ```shell text-generation-launcher --model-id bigscience/bloom-560m ``` Then run the benchmarking tool: ```shell text-generation-benchmark --tokenizer-name bigscience/bloom-560m ```
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/app.rs
/// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs use crate::generation::{Decode, Message, Prefill}; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; use text_generation_client::ClientError; use tokio::sync::mpsc; use tui::backend::Backend; use tui::layout::{Alignment, Constraint, Direction, Layout}; use tui::style::{Color, Modifier, Style}; use tui::text::{Span, Spans}; use tui::widgets::{ Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs, }; use tui::{symbols, Frame}; /// TUI powered App pub(crate) struct App { pub(crate) running: bool, pub(crate) data: Data, completed_runs: Vec<usize>, completed_batch: usize, current_batch: usize, current_tab: usize, touched_tab: bool, zoom: bool, is_error: bool, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, receiver: mpsc::Receiver<Result<Message, ClientError>>, } impl App { pub(crate) fn new( receiver: mpsc::Receiver<Result<Message, ClientError>>, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, batch_size: Vec<u32>, ) -> Self { let current_tab = 0; let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect(); let completed_batch = 0; let current_batch = 0; let is_error = false; let data = Data::new(n_run, batch_size); Self { running: true, data, completed_runs, completed_batch, current_batch, current_tab, touched_tab: false, zoom: false, is_error, tokenizer_name, sequence_length, decode_length, n_run, receiver, } } /// Handle crossterm key events pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { match key_event { // Increase and wrap tab KeyEvent { code: KeyCode::Right, .. } | KeyEvent { code: KeyCode::Tab, .. } => { self.touched_tab = true; self.current_tab = (self.current_tab + 1) % self.data.batch_size.len(); } // Decrease and wrap tab KeyEvent { code: KeyCode::Left, .. } => { self.touched_tab = true; if self.current_tab > 0 { self.current_tab -= 1; } else { self.current_tab = self.data.batch_size.len() - 1; } } // Zoom on throughput/latency fig KeyEvent { code: KeyCode::Char('+'), .. } => { self.zoom = true; } // Unzoom on throughput/latency fig KeyEvent { code: KeyCode::Char('-'), .. } => { self.zoom = false; } // Quit KeyEvent { code: KeyCode::Char('q'), .. } | KeyEvent { code: KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, .. } => { self.running = false; } _ => (), } } /// Get all pending messages from generation task pub(crate) fn tick(&mut self) { while let Ok(message) = self.receiver.try_recv() { match message { Ok(message) => match message { Message::Prefill(step) => self.data.push_prefill(step, self.current_batch), Message::Decode(step) => self.data.push_decode(step, self.current_batch), Message::EndRun => { self.completed_runs[self.current_batch] += 1; } Message::EndBatch => { self.data.end_batch(self.current_batch); self.completed_batch += 1; if self.current_batch < self.data.batch_size.len() - 1 { // Only go to next tab if the user never touched the tab keys if !self.touched_tab { self.current_tab += 1; } self.current_batch += 1; } } Message::Warmup => {} }, Err(_) => self.is_error = true, } } } /// Render frame pub fn render<B: Backend>(&mut self, f: &mut Frame<'_, B>) { let batch_progress = (self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0); let run_progress = (self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0); // Vertical layout let row5 = Layout::default() .direction(Direction::Vertical) .constraints( [ Constraint::Length(1), Constraint::Length(3), Constraint::Length(3), Constraint::Length(13), Constraint::Min(10), ] .as_ref(), ) .split(f.size()); // Top row horizontal layout let top = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[2]); // Mid row horizontal layout let mid = Layout::default() .direction(Direction::Horizontal) .constraints( [ Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), ] .as_ref(), ) .split(row5[3]); // Left mid row vertical layout let prefill_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[0]); // Right mid row vertical layout let decode_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[2]); let decode_text_latency = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(decode_text[0]); // Bottom row horizontal layout let bottom = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[4]); // Title let title = Block::default() .borders(Borders::NONE) .title(format!( "Model: {} | Sequence Length: {} | Decode Length: {}", self.tokenizer_name, self.sequence_length, self.decode_length )) .style( Style::default() .add_modifier(Modifier::BOLD) .fg(Color::White), ); f.render_widget(title, row5[0]); // Helper let helper = Block::default() .borders(Borders::NONE) .title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom") .title_alignment(Alignment::Right) .style(Style::default().fg(Color::White)); f.render_widget(helper, row5[0]); // Batch tabs let titles = self .data .batch_size .iter() .map(|b| { Spans::from(vec![Span::styled( format!("Batch: {b}"), Style::default().fg(Color::White), )]) }) .collect(); let tabs = Tabs::new(titles) .block(Block::default().borders(Borders::ALL).title("Tabs")) .select(self.current_tab) .style(Style::default().fg(Color::LightCyan)) .highlight_style( Style::default() .add_modifier(Modifier::BOLD) .bg(Color::Black), ); f.render_widget(tabs, row5[1]); // Total progress bar let color = if self.is_error { Color::Red } else { Color::LightGreen }; let batch_gauge = progress_gauge( "Total Progress", format!("{} / {}", self.completed_batch, self.data.batch_size.len()), batch_progress, color, ); f.render_widget(batch_gauge, top[0]); // Batch progress Bar let color = if self.is_error { Color::Red } else { Color::LightBlue }; let run_gauge = progress_gauge( "Batch Progress", format!( "{} / {}", self.completed_runs[self.current_batch], self.n_run ), run_progress, color, ); f.render_widget(run_gauge, top[1]); // Prefill text infos let prefill_latency_block = latency_paragraph( &mut self.data.prefill_latencies[self.current_tab], "Prefill", ); let prefill_throughput_block = throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill"); f.render_widget(prefill_latency_block, prefill_text[0]); f.render_widget(prefill_throughput_block, prefill_text[1]); // Prefill latency histogram let histo_width = 7; let bins = if mid[1].width < 2 { 0 } else { (mid[1].width as usize - 2) / (histo_width + 1) } .max(2); let histo_data = latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let prefill_histogram = latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16); f.render_widget(prefill_histogram, mid[1]); // Decode text info let decode_latency_block = latency_paragraph( &mut self.data.decode_latencies[self.current_tab], "Decode Total", ); let decode_token_latency_block = latency_paragraph( &mut self.data.decode_token_latencies[self.current_tab], "Decode Token", ); let decode_throughput_block = throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode"); f.render_widget(decode_latency_block, decode_text_latency[0]); f.render_widget(decode_token_latency_block, decode_text_latency[1]); f.render_widget(decode_throughput_block, decode_text[1]); // Decode latency histogram let histo_data = latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let decode_histogram = latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16); f.render_widget(decode_histogram, mid[3]); // Prefill latency/throughput chart let prefill_latency_throughput_chart = latency_throughput_chart( &self.data.prefill_batch_latency_throughput, &self.data.batch_size, self.zoom, "Prefill", ); f.render_widget(prefill_latency_throughput_chart, bottom[0]); // Decode latency/throughput chart let decode_latency_throughput_chart = latency_throughput_chart( &self.data.decode_batch_latency_throughput, &self.data.batch_size, self.zoom, "Decode", ); f.render_widget(decode_latency_throughput_chart, bottom[1]); } } /// App internal data struct pub(crate) struct Data { pub(crate) batch_size: Vec<u32>, pub(crate) prefill_latencies: Vec<Vec<f64>>, pub(crate) prefill_throughputs: Vec<Vec<f64>>, pub(crate) decode_latencies: Vec<Vec<f64>>, pub(crate) decode_token_latencies: Vec<Vec<f64>>, pub(crate) decode_throughputs: Vec<Vec<f64>>, pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>, pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>, } impl Data { fn new(n_run: usize, batch_size: Vec<u32>) -> Self { let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len()) .map(|_| Vec::with_capacity(n_run)) .collect(); let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone(); let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone(); let prefill_batch_latency_throughput: Vec<(f64, f64)> = Vec::with_capacity(batch_size.len()); let decode_batch_latency_throughput: Vec<(f64, f64)> = prefill_batch_latency_throughput.clone(); Self { batch_size, prefill_latencies, prefill_throughputs, decode_latencies, decode_token_latencies, decode_throughputs, prefill_batch_latency_throughput, decode_batch_latency_throughput, } } fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) { let latency = prefill.latency.as_micros() as f64 / 1000.0; self.prefill_latencies[batch_idx].push(latency); self.prefill_throughputs[batch_idx].push(prefill.throughput); } fn push_decode(&mut self, decode: Decode, batch_idx: usize) { let latency = decode.latency.as_micros() as f64 / 1000.0; let token_latency = decode.token_latency.as_micros() as f64 / 1000.0; self.decode_latencies[batch_idx].push(latency); self.decode_token_latencies[batch_idx].push(token_latency); self.decode_throughputs[batch_idx].push(decode.throughput); } fn end_batch(&mut self, batch_idx: usize) { self.prefill_batch_latency_throughput.push(( self.prefill_latencies[batch_idx].iter().sum::<f64>() / self.prefill_latencies[batch_idx].len() as f64, self.prefill_throughputs[batch_idx].iter().sum::<f64>() / self.prefill_throughputs[batch_idx].len() as f64, )); self.decode_batch_latency_throughput.push(( self.decode_latencies[batch_idx].iter().sum::<f64>() / self.decode_latencies[batch_idx].len() as f64, self.decode_throughputs[batch_idx].iter().sum::<f64>() / self.decode_throughputs[batch_idx].len() as f64, )); } } /// Progress bar fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge { Gauge::default() .block(Block::default().title(title).borders(Borders::ALL)) .gauge_style(Style::default().fg(color)) .label(Span::raw(label)) .ratio(progress) } /// Throughput paragraph fn throughput_paragraph<'a>(throughput: &Vec<f64>, name: &'static str) -> Paragraph<'a> { // Throughput average/high/low texts let throughput_texts = statis_spans(throughput, "tokens/secs"); // Throughput block Paragraph::new(throughput_texts).block( Block::default() .title(Span::raw(format!("{name} Throughput"))) .borders(Borders::ALL), ) } /// Latency paragraph fn latency_paragraph<'a>(latency: &mut Vec<f64>, name: &'static str) -> Paragraph<'a> { // Latency average/high/low texts let mut latency_texts = statis_spans(latency, "ms"); // Sort latency for percentiles float_ord::sort(latency); let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]); // Latency p50/p90/p99 texts let colors = vec![Color::LightGreen, Color::LightYellow, Color::LightRed]; for (i, (name, value)) in latency_percentiles.iter().enumerate() { let span = Spans::from(vec![Span::styled( format!("{name}: {value:.2} ms"), Style::default().fg(colors[i]), )]); latency_texts.push(span); } Paragraph::new(latency_texts).block( Block::default() .title(Span::raw(format!("{name} Latency"))) .borders(Borders::ALL), ) } /// Average/High/Low spans fn statis_spans<'a>(data: &Vec<f64>, unit: &'static str) -> Vec<Spans<'a>> { vec![ Spans::from(vec![Span::styled( format!( "Average: {:.2} {unit}", data.iter().sum::<f64>() / data.len() as f64 ), Style::default().fg(Color::LightBlue), )]), Spans::from(vec![Span::styled( format!( "Lowest: {:.2} {unit}", data.iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN) ), Style::default().fg(Color::Reset), )]), Spans::from(vec![Span::styled( format!( "Highest: {:.2} {unit}", data.iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN) ), Style::default().fg(Color::Reset), )]), ] } /// Latency histogram data fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> { let histo_data: Vec<(String, u64)> = { let histo = crate::utils::histogram(latency, bins); histo .into_iter() .map(|(label, v)| (format!("{label:.2}"), v as u64)) .collect() }; histo_data } /// Latency Histogram fn latency_histogram<'a>( histo_data_str: &'a Vec<(&'a str, u64)>, name: &'static str, ) -> BarChart<'a> { BarChart::default() .block( Block::default() .title(format!("{name} latency histogram")) .style(Style::default().fg(Color::LightYellow).bg(Color::Reset)) .borders(Borders::ALL), ) .data(histo_data_str.as_slice()) } /// Latency/Throughput chart fn latency_throughput_chart<'a>( latency_throughput: &'a Vec<(f64, f64)>, batch_sizes: &'a [u32], zoom: bool, name: &'static str, ) -> Chart<'a> { let latency_iter = latency_throughput.iter().map(|(l, _)| l); let throughput_iter = latency_throughput.iter().map(|(_, t)| t); // Get extreme values let min_latency: f64 = *latency_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max_latency: f64 = *latency_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let min_throughput: f64 = *throughput_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max_throughput: f64 = *throughput_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); // Char min max values let min_x = if zoom { ((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0 } else { 0.0 }; let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0; let step_x = (max_x - min_x) / 4.0; // Chart min max values let min_y = if zoom { ((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0 } else { 0.0 }; let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0; let step_y = (max_y - min_y) / 4.0; // Labels let mut x_labels = vec![Span::styled( format!("{min_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { x_labels.push(Span::styled( format!("{:.2}", min_x + ((i + 1) as f64 * step_x)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } x_labels.push(Span::styled( format!("{max_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Labels let mut y_labels = vec![Span::styled( format!("{min_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { y_labels.push(Span::styled( format!("{:.2}", min_y + ((i + 1) as f64 * step_y)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } y_labels.push(Span::styled( format!("{max_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Chart dataset let colors = color_vec(); let datasets: Vec<Dataset> = (0..latency_throughput.len()) .map(|i| { let color_idx = i % colors.len(); Dataset::default() .name(batch_sizes[i].to_string()) .marker(symbols::Marker::Block) .style(Style::default().fg(colors[color_idx])) .graph_type(GraphType::Scatter) .data(&latency_throughput[i..(i + 1)]) }) .collect(); // Chart Chart::new(datasets) .style(Style::default().fg(Color::Cyan).bg(Color::Reset)) .block( Block::default() .title(Span::styled( format!("{name} throughput over latency"), Style::default().fg(Color::Gray).bg(Color::Reset), )) .borders(Borders::ALL), ) .x_axis( Axis::default() .title("ms") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(x_labels) .bounds([min_x, max_x]), ) .y_axis( Axis::default() .title("tokens/secs") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(y_labels) .bounds([min_y, max_y]), ) } // Colors for latency/throughput chart fn color_vec() -> Vec<Color> { vec![ Color::Red, Color::Green, Color::Yellow, Color::Blue, Color::Magenta, Color::Cyan, Color::Gray, Color::DarkGray, Color::LightRed, Color::LightGreen, Color::LightYellow, Color::LightBlue, Color::LightMagenta, Color::LightCyan, ] }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/event.rs
/// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs use crossterm::event; use std::time::{Duration, Instant}; use tokio::sync::{broadcast, mpsc}; /// Events #[derive(Debug)] pub(crate) enum Event { /// Terminal tick. Tick, /// Key press. Key(event::KeyEvent), /// Terminal resize. Resize(u16, u16), } pub(crate) async fn terminal_event_task( fps: u32, event_sender: mpsc::Sender<Event>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { _ = event_loop(fps, event_sender) => { }, _ = shutdown_receiver.recv() => {} } } /// Main event loop async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) { // Frame budget let per_frame = Duration::from_secs(1) / fps; // When was last frame executed let mut last_frame = Instant::now(); loop { // Sleep to avoid blocking the thread for too long if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) { tokio::time::sleep(sleep).await; } // Get crossterm event and send a new one over the channel if event::poll(Duration::from_secs(0)).expect("no events available") { match event::read().expect("unable to read event") { event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()), event::Event::Resize(w, h) => { event_sender.send(Event::Resize(w, h)).await.unwrap_or(()) } _ => (), } } // Frame budget exceeded if last_frame.elapsed() >= per_frame { // Send tick event_sender.send(Event::Tick).await.unwrap_or(()); // Rest last_frame time last_frame = Instant::now(); } } }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/generation.rs
use std::time::{Duration, Instant}; use text_generation_client::{ Batch, CachedBatch, ClientError, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters, }; use tokenizers::{Tokenizer, TruncationDirection}; use tokio::sync::{broadcast, mpsc}; const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; #[derive(Debug, Clone)] pub(crate) struct Prefill { pub(crate) latency: Duration, pub(crate) throughput: f64, } #[derive(Debug, Clone)] pub(crate) struct Decode { pub(crate) latency: Duration, pub(crate) token_latency: Duration, pub(crate) throughput: f64, } #[derive(Debug)] pub(crate) enum Message { Warmup, Prefill(Prefill), Decode(Decode), EndRun, EndBatch, } /// Benchmarking task #[allow(clippy::too_many_arguments)] pub(crate) async fn generation_task( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, n_runs, warmups, parameters, client, run_sender.clone()) => { if let Err(err) = res { run_sender.send(Err(err)).await.unwrap_or(()); } }, _ = shutdown_receiver.recv() => {} } } /// Benchmark prefill/decode #[allow(clippy::too_many_arguments)] async fn generate_runs( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, mut client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, ) -> Result<(), ClientError> { // Create a dummy sequence let sequence = create_sequence(sequence_length, tokenizer); for b in batch_size { // Warmups on batch size for _ in 0..warmups { let (_, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), &mut client, ) .await?; let _ = decode(decode_batch, &mut client).await?; // Send warmup message run_sender.send(Ok(Message::Warmup)).await.unwrap_or(()); } for _ in 0..n_runs { let (prefill, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), &mut client, ) .await?; // Send prefill message run_sender .send(Ok(Message::Prefill(prefill))) .await .unwrap_or(()); let decode = decode(decode_batch, &mut client).await?; // Send decode message run_sender .send(Ok(Message::Decode(decode))) .await .unwrap_or(()); // Send run ended message run_sender.send(Ok(Message::EndRun)).await.unwrap_or(()); } // Batch ended run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(()); } Ok(()) } // Run a prefill step async fn prefill( sequence: String, sequence_length: u32, batch_size: u32, decode_length: u32, parameters: NextTokenChooserParameters, client: &mut ShardedClient, ) -> Result<(Prefill, CachedBatch), ClientError> { // Create requests let requests = (0..batch_size) .map(|id| Request { id: id.into(), prefill_logprobs: false, inputs: sequence.clone(), truncate: sequence_length, parameters: Some(parameters.clone()), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: decode_length, stop_sequences: vec![], ignore_eos_token: true, // Will not stop even if a eos token is generated }), }) .collect(); let batch = Batch { id: 0, requests, size: batch_size, max_tokens: batch_size * (sequence_length + decode_length), }; // Run prefill let start_time = Instant::now(); let (_, decode_batch) = client.prefill(batch.clone()).await?; // Get latency let latency = start_time.elapsed(); // Compute throughput from latency and batch size let throughput = batch_size as f64 / latency.as_secs_f64(); // Decode batch cannot be empty let decode_batch = decode_batch.expect("decode_batch is None. This is a bug."); let step = Prefill { latency, throughput, }; Ok((step, decode_batch)) } /// Run a full decode async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> { let mut decode_length = 0; let batch_size = batch.size; let start_time = Instant::now(); // Full decode over decode length let mut next_batch = Some(batch); while let Some(batch) = next_batch { let result = client.decode(vec![batch]).await?; next_batch = result.1; decode_length += 1; } // Get latency let latency = start_time.elapsed(); let token_latency = latency / decode_length; // Compute throughput from latency, batch size and decode length let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64(); let step = Decode { latency, token_latency, throughput, }; Ok(step) } /// Create a dummy sequence of the correct length fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String { let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len(); // Repeat lorem ipsum to cover sequence length let string_sequence = LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len()); // Encode sequence let mut encoding = tokenizer.encode(string_sequence, true).unwrap(); // Truncate to sequence_length encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left); // Decode tokenizer .decode(Vec::from(encoding.get_ids()), false) .unwrap() }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/lib.rs
mod app; mod event; mod generation; mod table; mod utils; use crate::app::App; use crate::event::Event; use crossterm::ExecutableCommand; use std::io; use text_generation_client::{NextTokenChooserParameters, ShardedClient}; use tokenizers::Tokenizer; use tokio::sync::{broadcast, mpsc}; use tui::backend::CrosstermBackend; use tui::Terminal; /// Run benchmarking app #[allow(clippy::too_many_arguments)] pub async fn run( tokenizer_name: String, tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, watermark: bool, do_sample: bool, client: ShardedClient, ) -> Result<(), crossterm::ErrorKind> { let parameters = NextTokenChooserParameters { temperature: temperature.unwrap_or(1.0), top_k: top_k.unwrap_or(0), top_p: top_p.unwrap_or(1.0), typical_p: typical_p.unwrap_or(1.0), do_sample, seed: 0, repetition_penalty: repetition_penalty.unwrap_or(1.0), watermark, }; // Initialize terminal properties crossterm::terminal::enable_raw_mode()?; io::stdout().execute(crossterm::terminal::EnterAlternateScreen)?; io::stdout().execute(crossterm::cursor::Hide)?; // Initialize terminal let mut terminal = { let backend = CrosstermBackend::new(io::stdout()); Terminal::new(backend)? }; // Create message channel between generation_task and app let (run_sender, run_receiver) = mpsc::channel(8); // Crossterm event channel let (event_sender, mut event_receiver) = mpsc::channel(8); // Shutdown channel to terminate tasks let (shutdown_sender, _) = broadcast::channel(1); // Channel to check if tasks terminated let (shutdown_guard_sender, mut shutdown_guard_receiver) = mpsc::channel(1); // Create generation task tokio::spawn(generation::generation_task( tokenizer, batch_size.clone(), sequence_length, decode_length, n_runs, warmups, parameters, client, run_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Create event task tokio::spawn(event::terminal_event_task( 250, event_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Drop our end of shutdown sender drop(shutdown_guard_sender); // Create App let mut app = App::new( run_receiver, tokenizer_name.clone(), sequence_length, decode_length, n_runs, batch_size, ); while app.running { // Draw frame terminal.draw(|frame| app.render(frame))?; // Await a new event from event handling task match event_receiver.recv().await { None => break, // Update app state Some(event) => match event { Event::Tick => app.tick(), Event::Key(key_event) => app.handle_key_event(key_event), _ => {} }, } } // Ask tasks to shutdown let _ = shutdown_sender.send(()); // Wait for tasks to shutdown let _ = shutdown_guard_receiver.recv().await; // Revert terminal to original view io::stdout().execute(crossterm::terminal::LeaveAlternateScreen)?; crossterm::terminal::disable_raw_mode()?; io::stdout().execute(crossterm::cursor::Show)?; let parameters_table = table::parameters_table( tokenizer_name, sequence_length, decode_length, n_runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, ); println!("\n{parameters_table}\n"); let latency_table = table::latency_table(&app.data); println!("\n{latency_table}\n"); let throughput_table = table::throughput_table(&app.data); println!("\n{throughput_table}\n"); Ok(()) }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/main.rs
/// Text Generation Inference benchmarking tool /// /// Inspired by the great Oha app: https://github.com/hatoo/oha /// and: https://github.com/orhun/rust-tui-template use clap::Parser; use std::path::Path; use text_generation_client::ShardedClient; use tokenizers::{FromPretrainedParameters, Tokenizer}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the tokenizer (as in model_id on the huggingface hub, or local path). #[clap(short, long, env)] tokenizer_name: String, /// The revision to use for the tokenizer if on the hub. #[clap(default_value = "main", long, env)] revision: String, /// The various batch sizes to benchmark for, the idea is to get enough /// batching to start seeing increased latency, this usually means you're /// moving from memory bound (usual as BS=1) to compute bound, and this is /// a sweet spot for the maximum batch size for the model under test #[clap(short, long)] batch_size: Option<Vec<u32>>, /// This is the initial prompt sent to the text-generation-server length /// in token. Longer prompt will slow down the benchmark. Usually the /// latency grows somewhat linearly with this for the prefill step. /// /// Most importantly, the prefill step is usually not the one dominating /// your runtime, so it's ok to keep it short. #[clap(default_value = "10", short, long, env)] sequence_length: u32, /// This is how many tokens will be generated by the server and averaged out /// to give the `decode` latency. This is the *critical* number you want to optimize for /// LLM spend most of their time doing decoding. /// /// Decode latency is usually quite stable. #[clap(default_value = "8", short, long, env)] decode_length: u32, ///How many runs should we average from #[clap(default_value = "10", short, long, env)] runs: usize, /// Number of warmup cycles #[clap(default_value = "1", short, long, env)] warmups: usize, /// The location of the grpc socket. This benchmark tool bypasses the router /// completely and directly talks to the gRPC processes #[clap(default_value = "/tmp/text-generation-server-0", short, long, env)] master_shard_uds_path: String, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] temperature: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_k: Option<u32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] typical_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] repetition_penalty: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] watermark: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] do_sample: bool, } fn main() -> Result<(), Box<dyn std::error::Error>> { init_logging(); // Get args let args = Args::parse(); // Pattern match configuration let Args { tokenizer_name, revision, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, master_shard_uds_path, } = args; let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]); // Tokenizer instance // This will only be used to validate payloads tracing::info!("Loading tokenizer"); let local_path = Path::new(&tokenizer_name); let tokenizer = if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists() { // Load local tokenizer tracing::info!("Found local tokenizer"); Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap() } else { tracing::info!("Downloading tokenizer"); // Parse Huggingface hub token let auth_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok(); // Download and instantiate tokenizer // We need to download it outside of the Tokio runtime let params = FromPretrainedParameters { revision, auth_token, ..Default::default() }; Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap() }; tracing::info!("Tokenizer loaded"); // Launch Tokio runtime tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(async { // Instantiate sharded client from the master unix socket tracing::info!("Connect to model server"); let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .expect("Could not connect to server"); // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .expect("Unable to clear cache"); tracing::info!("Connected"); // Run app text_generation_benchmark::run( tokenizer_name, tokenizer, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, sharded_client, ) .await .unwrap(); }); Ok(()) } /// Init logging using LOG_LEVEL fn init_logging() { // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(fmt_layer) .init(); }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/table.rs
use crate::app::Data; use tabled::settings::Merge; use tabled::{builder::Builder, settings::Style, Table}; #[allow(clippy::too_many_arguments)] pub(crate) fn parameters_table( tokenizer_name: String, sequence_length: u32, decode_length: u32, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, watermark: bool, do_sample: bool, ) -> Table { let mut builder = Builder::default(); builder.set_header(["Parameter", "Value"]); builder.push_record(["Model", &tokenizer_name]); builder.push_record(["Sequence Length", &sequence_length.to_string()]); builder.push_record(["Decode Length", &decode_length.to_string()]); builder.push_record(["N Runs", &n_runs.to_string()]); builder.push_record(["Warmups", &warmups.to_string()]); builder.push_record(["Temperature", &format!("{temperature:?}")]); builder.push_record(["Top K", &format!("{top_k:?}")]); builder.push_record(["Top P", &format!("{top_p:?}")]); builder.push_record(["Typical P", &format!("{typical_p:?}")]); builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]); builder.push_record(["Watermark", &watermark.to_string()]); builder.push_record(["Do Sample", &do_sample.to_string()]); let mut table = builder.build(); table.with(Style::markdown()); table } pub(crate) fn latency_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header([ "Step", "Batch Size", "Average", "Lowest", "Highest", "p50", "p90", "p99", ]); add_latencies( &mut builder, "Prefill", &data.batch_size, &data.prefill_latencies, ); add_latencies( &mut builder, "Decode (token)", &data.batch_size, &data.decode_token_latencies, ); add_latencies( &mut builder, "Decode (total)", &data.batch_size, &data.decode_latencies, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } pub(crate) fn throughput_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]); add_throuhgputs( &mut builder, "Prefill", &data.batch_size, &data.prefill_throughputs, ); add_throuhgputs( &mut builder, "Decode", &data.batch_size, &data.decode_throughputs, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } fn add_latencies( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_latencies: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let latencies = &batch_latencies[i]; let (avg, min, max) = avg_min_max(latencies); let row = [ step, &b.to_string(), &format_value(avg, "ms"), &format_value(min, "ms"), &format_value(max, "ms"), &format_value(px(latencies, 50), "ms"), &format_value(px(latencies, 90), "ms"), &format_value(px(latencies, 99), "ms"), ]; builder.push_record(row); } } fn add_throuhgputs( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_throughputs: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let throughputs = &batch_throughputs[i]; let (avg, min, max) = avg_min_max(throughputs); let row = [ step, &b.to_string(), &format_value(avg, "tokens/secs"), &format_value(min, "tokens/secs"), &format_value(max, "tokens/secs"), ]; builder.push_record(row); } } fn avg_min_max(data: &Vec<f64>) -> (f64, f64, f64) { let average = data.iter().sum::<f64>() / data.len() as f64; let min = data .iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max = data .iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); (average, *min, *max) } fn px(data: &Vec<f64>, p: u32) -> f64 { let i = (f64::from(p) / 100.0 * data.len() as f64) as usize; *data.get(i).unwrap_or(&std::f64::NAN) } fn format_value(value: f64, unit: &'static str) -> String { format!("{:.2} {unit}", value) }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/utils.rs
/// MIT License // // Copyright (c) 2020 hatoo // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. use std::collections::BTreeMap; pub(crate) fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> { assert!(bins >= 2); let mut bucket: Vec<usize> = vec![0; bins]; let min = values.iter().collect::<average::Min>().min(); let max = values.iter().collect::<average::Max>().max(); let step = (max - min) / (bins - 1) as f64; for &v in values { let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1); bucket[i] += 1; } bucket .into_iter() .enumerate() .map(|(i, v)| (min + step * i as f64, v)) .collect() } pub(crate) fn percentiles(values: &[f64], pecents: &[i32]) -> BTreeMap<String, f64> { pecents .iter() .map(|&p| { let i = (f64::from(p) / 100.0 * values.len() as f64) as usize; (format!("p{p}"), *values.get(i).unwrap_or(&std::f64::NAN)) }) .collect() }
0
hf_public_repos/text-generation-inference/clients
hf_public_repos/text-generation-inference/clients/python/Makefile
unit-tests: python -m pytest --cov=text_generation tests install: pip install pip --upgrade pip install -e .
0
hf_public_repos/text-generation-inference/clients
hf_public_repos/text-generation-inference/clients/python/README.md
# Text Generation The Hugging Face Text Generation Python library provides a convenient way of interfacing with a `text-generation-inference` instance running on [Hugging Face Inference Endpoints](https://huggingface.co/inference-endpoints) or on the Hugging Face Hub. ## Get Started ### Install ```shell pip install text-generation ``` ### Inference API Usage ```python from text_generation import InferenceAPIClient client = InferenceAPIClient("bigscience/bloomz") text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import InferenceAPIAsyncClient client = InferenceAPIAsyncClient("bigscience/bloomz") response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` Check all currently deployed models on the Huggingface Inference API with `Text Generation` support: ```python from text_generation.inference_api import deployed_models print(deployed_models()) ``` ### Hugging Face Inference Endpoint usage ```python from text_generation import Client endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = Client(endpoint_url) text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import AsyncClient endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = AsyncClient(endpoint_url) response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` ### Types ```python # Request Parameters class Parameters: # Activate logits sampling do_sample: bool # Maximum number of generated tokens max_new_tokens: int # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] # Whether to prepend the prompt to the generated text return_full_text: bool # Stop generating tokens if a member of `stop_sequences` is generated stop: List[str] # Random sampling seed seed: Optional[int] # The value used to module the logits distribution. temperature: Optional[float] # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_k: Optional[int] # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or # higher are kept for generation. top_p: Optional[float] # truncate inputs tokens to the given size truncate: Optional[int] # Typical Decoding mass # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information typical_p: Optional[float] # Generate best_of sequences and return the one if the highest token logprobs best_of: Optional[int] # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) watermark: bool # Get decoder input token logprobs and ids decoder_input_details: bool # Decoder input tokens class InputToken: # Token ID from the model tokenizer id: int # Token text text: str # Logprob # Optional since the logprob of the first token cannot be computed logprob: Optional[float] # Generated tokens class Token: # Token ID from the model tokenizer id: int # Token text text: str # Logprob logprob: float # Is the token a special token # Can be used to ignore tokens when concatenating special: bool # Generation finish reason class FinishReason(Enum): # number of generated tokens == `max_new_tokens` Length = "length" # the model generated its end of sequence token EndOfSequenceToken = "eos_token" # the model generated a text included in `stop_sequences` StopSequence = "stop_sequence" # Additional sequences when using the `best_of` parameter class BestOfSequence: # Generated text generated_text: str # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # `generate` details class Details: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Additional sequences when using the `best_of` parameter best_of_sequences: Optional[List[BestOfSequence]] # `generate` return value class Response: # Generated text generated_text: str # Generation details details: Details # `generate_stream` details class StreamDetails: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # `generate_stream` return value class StreamResponse: # Generated token token: Token # Complete generated text # Only available when the generation is finished generated_text: Optional[str] # Generation details # Only available when the generation is finished details: Optional[StreamDetails] # Inference API currently deployed model class DeployedModel: model_id: str sha: str ```
0
hf_public_repos/text-generation-inference/clients
hf_public_repos/text-generation-inference/clients/python/poetry.lock
[[package]] name = "aiohttp" version = "3.8.4" description = "Async http client/server framework (asyncio)" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] aiosignal = ">=1.1.2" async-timeout = ">=4.0.0a3,<5.0" asynctest = {version = "0.13.0", markers = "python_version < \"3.8\""} attrs = ">=17.3.0" charset-normalizer = ">=2.0,<4.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} yarl = ">=1.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns", "cchardet"] [[package]] name = "aiosignal" version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] frozenlist = ">=1.1.0" [[package]] name = "async-timeout" version = "4.0.2" description = "Timeout context manager for asyncio programs" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""} [[package]] name = "asynctest" version = "0.13.0" description = "Enhance the standard unittest package with features for testing asyncio libraries" category = "main" optional = false python-versions = ">=3.5" [[package]] name = "atomicwrites" version = "1.4.1" description = "Atomic file writes." category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "attrs" version = "22.2.0" description = "Classes Without Boilerplate" category = "main" optional = false python-versions = ">=3.6" [package.extras] cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] dev = ["attrs[docs,tests]"] docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] tests = ["attrs[tests-no-zope]", "zope.interface"] tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=0.971,<0.990)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests_no_zope = ["cloudpickle", "hypothesis", "mypy (>=0.971,<0.990)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "certifi" version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = ">=3.6" [[package]] name = "charset-normalizer" version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false python-versions = ">=3.7.0" [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" [[package]] name = "coverage" version = "7.2.1" description = "Code coverage measurement for Python" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] [[package]] name = "filelock" version = "3.10.0" description = "A platform independent file lock." category = "main" optional = false python-versions = ">=3.7" [package.extras] docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.2.1)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-timeout (>=2.1)"] [[package]] name = "frozenlist" version = "1.3.3" description = "A list-like structure which implements collections.abc.MutableSequence" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "huggingface-hub" version = "0.13.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" category = "main" optional = false python-versions = ">=3.7.0" [package.dependencies] filelock = "*" importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} packaging = ">=20.9" pyyaml = ">=5.1" requests = "*" tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] cli = ["InquirerPy (==0.3.4)"] dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] tensorflow = ["graphviz", "pydot", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"] torch = ["torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] [[package]] name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" category = "main" optional = false python-versions = ">=3.5" [[package]] name = "importlib-metadata" version = "6.0.0" description = "Read metadata from Python packages" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "multidict" version = "6.0.4" description = "multidict implementation" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "packaging" version = "23.0" description = "Core utilities for Python packages" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "pydantic" version = "1.10.6" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] typing-extensions = ">=4.2.0" [package.extras] dotenv = ["python-dotenv (>=0.10.4)"] email = ["email-validator (>=1.0.3)"] [[package]] name = "pytest" version = "6.2.5" description = "pytest: simple powerful testing with Python" category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" py = ">=1.8.2" toml = "*" [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] [[package]] name = "pytest-asyncio" version = "0.17.2" description = "Pytest support for asyncio" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] pytest = ">=6.1.0" typing-extensions = {version = ">=4.0", markers = "python_version < \"3.8\""} [package.extras] testing = ["coverage (==6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (==0.931)"] [[package]] name = "pytest-cov" version = "3.0.0" description = "Pytest plugin for measuring coverage." category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] [[package]] name = "PyYAML" version = "6.0" description = "YAML parser and emitter for Python" category = "main" optional = false python-versions = ">=3.6" [[package]] name = "requests" version = "2.28.2" description = "Python HTTP for Humans." category = "main" optional = false python-versions = ">=3.7, <4" [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<1.27" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "tqdm" version = "4.65.0" description = "Fast, Extensible Progress Meter" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] dev = ["py-make (>=0.1.0)", "twine", "wheel"] notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] [[package]] name = "typing-extensions" version = "4.5.0" description = "Backported and Experimental Type Hints for Python 3.7+" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "urllib3" version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "yarl" version = "1.8.2" description = "Yet another URL library" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] idna = ">=2.0" multidict = ">=4.0" typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [[package]] name = "zipp" version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false python-versions = ">=3.7" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [metadata] lock-version = "1.1" python-versions = "^3.7" content-hash = "0db2f97d52c557dd7f90c55b4ad5bbe308c957c5f7f99fec53c57e0a13822cb4" [metadata.files] aiohttp = [ {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"}, {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"}, {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"}, {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"}, {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"}, {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"}, {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"}, {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"}, {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"}, {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"}, {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"}, {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"}, {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"}, {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"}, {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"}, {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"}, {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"}, {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"}, {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"}, {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"}, {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"}, {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"}, {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"}, {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"}, {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"}, {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"}, {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"}, {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"}, {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"}, {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"}, {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"}, {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"}, {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"}, {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"}, {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"}, {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"}, {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"}, {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"}, {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"}, {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"}, {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"}, {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"}, {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"}, {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"}, {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"}, {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"}, {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"}, {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"}, {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"}, {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"}, {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"}, {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"}, {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"}, {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"}, {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"}, {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"}, {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"}, {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"}, {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"}, {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"}, {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"}, {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"}, {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"}, {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"}, {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"}, {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"}, {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"}, {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"}, {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"}, {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"}, {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"}, {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"}, {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"}, {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"}, {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"}, {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"}, {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"}, {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"}, {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"}, {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"}, {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"}, {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"}, {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"}, {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"}, {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"}, {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"}, {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"}, ] aiosignal = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, ] async-timeout = [ {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, ] asynctest = [ {file = "asynctest-0.13.0-py3-none-any.whl", hash = "sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676"}, {file = "asynctest-0.13.0.tar.gz", hash = "sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac"}, ] atomicwrites = [ {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, ] attrs = [ {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, ] certifi = [ {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, ] charset-normalizer = [ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, ] colorama = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] coverage = [ {file = "coverage-7.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49567ec91fc5e0b15356da07a2feabb421d62f52a9fff4b1ec40e9e19772f5f8"}, {file = "coverage-7.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2ef6cae70168815ed91388948b5f4fcc69681480a0061114db737f957719f03"}, {file = "coverage-7.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3004765bca3acd9e015794e5c2f0c9a05587f5e698127ff95e9cfba0d3f29339"}, {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cca7c0b7f5881dfe0291ef09ba7bb1582cb92ab0aeffd8afb00c700bf692415a"}, {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2167d116309f564af56f9aa5e75ef710ef871c5f9b313a83050035097b56820"}, {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cb5f152fb14857cbe7f3e8c9a5d98979c4c66319a33cad6e617f0067c9accdc4"}, {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:87dc37f16fb5e3a28429e094145bf7c1753e32bb50f662722e378c5851f7fdc6"}, {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e191a63a05851f8bce77bc875e75457f9b01d42843f8bd7feed2fc26bbe60833"}, {file = "coverage-7.2.1-cp310-cp310-win32.whl", hash = "sha256:e3ea04b23b114572b98a88c85379e9e9ae031272ba1fb9b532aa934c621626d4"}, {file = "coverage-7.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:0cf557827be7eca1c38a2480484d706693e7bb1929e129785fe59ec155a59de6"}, {file = "coverage-7.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570c21a29493b350f591a4b04c158ce1601e8d18bdcd21db136fbb135d75efa6"}, {file = "coverage-7.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e872b082b32065ac2834149dc0adc2a2e6d8203080501e1e3c3c77851b466f9"}, {file = "coverage-7.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac6343bae03b176e9b58104a9810df3cdccd5cfed19f99adfa807ffbf43cf9b"}, {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abacd0a738e71b20e224861bc87e819ef46fedba2fb01bc1af83dfd122e9c319"}, {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9256d4c60c4bbfec92721b51579c50f9e5062c21c12bec56b55292464873508"}, {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80559eaf6c15ce3da10edb7977a1548b393db36cbc6cf417633eca05d84dd1ed"}, {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bd7e628f6c3ec4e7d2d24ec0e50aae4e5ae95ea644e849d92ae4805650b4c4e"}, {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09643fb0df8e29f7417adc3f40aaf379d071ee8f0350ab290517c7004f05360b"}, {file = "coverage-7.2.1-cp311-cp311-win32.whl", hash = "sha256:1b7fb13850ecb29b62a447ac3516c777b0e7a09ecb0f4bb6718a8654c87dfc80"}, {file = "coverage-7.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:617a94ada56bbfe547aa8d1b1a2b8299e2ec1ba14aac1d4b26a9f7d6158e1273"}, {file = "coverage-7.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8649371570551d2fd7dee22cfbf0b61f1747cdfb2b7587bb551e4beaaa44cb97"}, {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d2b9b5e70a21474c105a133ba227c61bc95f2ac3b66861143ce39a5ea4b3f84"}, {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82c988954722fa07ec5045c57b6d55bc1a0890defb57cf4a712ced65b26ddd"}, {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:861cc85dfbf55a7a768443d90a07e0ac5207704a9f97a8eb753292a7fcbdfcfc"}, {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0339dc3237c0d31c3b574f19c57985fcbe494280153bbcad33f2cdf469f4ac3e"}, {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5928b85416a388dd557ddc006425b0c37e8468bd1c3dc118c1a3de42f59e2a54"}, {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d3843ca645f62c426c3d272902b9de90558e9886f15ddf5efe757b12dd376f5"}, {file = "coverage-7.2.1-cp37-cp37m-win32.whl", hash = "sha256:6a034480e9ebd4e83d1aa0453fd78986414b5d237aea89a8fdc35d330aa13bae"}, {file = "coverage-7.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6fce673f79a0e017a4dc35e18dc7bb90bf6d307c67a11ad5e61ca8d42b87cbff"}, {file = "coverage-7.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f099da6958ddfa2ed84bddea7515cb248583292e16bb9231d151cd528eab657"}, {file = "coverage-7.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:97a3189e019d27e914ecf5c5247ea9f13261d22c3bb0cfcfd2a9b179bb36f8b1"}, {file = "coverage-7.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a81dbcf6c6c877986083d00b834ac1e84b375220207a059ad45d12f6e518a4e3"}, {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d2c3dde4c0b9be4b02067185136b7ee4681978228ad5ec1278fa74f5ca3e99"}, {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a209d512d157379cc9ab697cbdbb4cfd18daa3e7eebaa84c3d20b6af0037384"}, {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f3d07edb912a978915576a776756069dede66d012baa503022d3a0adba1b6afa"}, {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8dca3c1706670297851bca1acff9618455122246bdae623be31eca744ade05ec"}, {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b1991a6d64231a3e5bbe3099fb0dd7c9aeaa4275ad0e0aeff4cb9ef885c62ba2"}, {file = "coverage-7.2.1-cp38-cp38-win32.whl", hash = "sha256:22c308bc508372576ffa3d2dbc4824bb70d28eeb4fcd79d4d1aed663a06630d0"}, {file = "coverage-7.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:b0c0d46de5dd97f6c2d1b560bf0fcf0215658097b604f1840365296302a9d1fb"}, {file = "coverage-7.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4dd34a935de268a133e4741827ae951283a28c0125ddcdbcbba41c4b98f2dfef"}, {file = "coverage-7.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f8318ed0f3c376cfad8d3520f496946977abde080439d6689d7799791457454"}, {file = "coverage-7.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:834c2172edff5a08d78e2f53cf5e7164aacabeb66b369f76e7bb367ca4e2d993"}, {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4d70c853f0546855f027890b77854508bdb4d6a81242a9d804482e667fff6e6"}, {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a6450da4c7afc4534305b2b7d8650131e130610cea448ff240b6ab73d7eab63"}, {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:99f4dd81b2bb8fc67c3da68b1f5ee1650aca06faa585cbc6818dbf67893c6d58"}, {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bdd3f2f285ddcf2e75174248b2406189261a79e7fedee2ceeadc76219b6faa0e"}, {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f29351393eb05e6326f044a7b45ed8e38cb4dcc38570d12791f271399dc41431"}, {file = "coverage-7.2.1-cp39-cp39-win32.whl", hash = "sha256:e2b50ebc2b6121edf352336d503357321b9d8738bb7a72d06fc56153fd3f4cd8"}, {file = "coverage-7.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd5a12239c0006252244f94863f1c518ac256160cd316ea5c47fb1a11b25889a"}, {file = "coverage-7.2.1-pp37.pp38.pp39-none-any.whl", hash = "sha256:436313d129db7cf5b4ac355dd2bd3f7c7e5294af077b090b85de75f8458b8616"}, {file = "coverage-7.2.1.tar.gz", hash = "sha256:c77f2a9093ccf329dd523a9b2b3c854c20d2a3d968b6def3b820272ca6732242"}, ] filelock = [ {file = "filelock-3.10.0-py3-none-any.whl", hash = "sha256:e90b34656470756edf8b19656785c5fea73afa1953f3e1b0d645cef11cab3182"}, {file = "filelock-3.10.0.tar.gz", hash = "sha256:3199fd0d3faea8b911be52b663dfccceb84c95949dd13179aa21436d1a79c4ce"}, ] frozenlist = [ {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, ] huggingface-hub = [ {file = "huggingface_hub-0.13.2-py3-none-any.whl", hash = "sha256:745c4cbd97a27fc5c1c6c89cb477662004c88bc3dd89bafc1a27ef24af77f944"}, {file = "huggingface_hub-0.13.2.tar.gz", hash = "sha256:246e8eb39b6e6e9d9d5846e4b56c265cdf1872f48ba5a13a1321295d371626f5"}, ] idna = [ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] importlib-metadata = [ {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, ] iniconfig = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] multidict = [ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] packaging = [ {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, ] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] pydantic = [ {file = "pydantic-1.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9289065611c48147c1dd1fd344e9d57ab45f1d99b0fb26c51f1cf72cd9bcd31"}, {file = "pydantic-1.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c32b6bba301490d9bb2bf5f631907803135e8085b6aa3e5fe5a770d46dd0160"}, {file = "pydantic-1.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd9b9e98068fa1068edfc9eabde70a7132017bdd4f362f8b4fd0abed79c33083"}, {file = "pydantic-1.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c84583b9df62522829cbc46e2b22e0ec11445625b5acd70c5681ce09c9b11c4"}, {file = "pydantic-1.10.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b41822064585fea56d0116aa431fbd5137ce69dfe837b599e310034171996084"}, {file = "pydantic-1.10.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61f1f08adfaa9cc02e0cbc94f478140385cbd52d5b3c5a657c2fceb15de8d1fb"}, {file = "pydantic-1.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:32937835e525d92c98a1512218db4eed9ddc8f4ee2a78382d77f54341972c0e7"}, {file = "pydantic-1.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd5c531b22928e63d0cb1868dee76123456e1de2f1cb45879e9e7a3f3f1779b"}, {file = "pydantic-1.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e277bd18339177daa62a294256869bbe84df1fb592be2716ec62627bb8d7c81d"}, {file = "pydantic-1.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f15277d720aa57e173954d237628a8d304896364b9de745dcb722f584812c7"}, {file = "pydantic-1.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b243b564cea2576725e77aeeda54e3e0229a168bc587d536cd69941e6797543d"}, {file = "pydantic-1.10.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3ce13a558b484c9ae48a6a7c184b1ba0e5588c5525482681db418268e5f86186"}, {file = "pydantic-1.10.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3ac1cd4deed871dfe0c5f63721e29debf03e2deefa41b3ed5eb5f5df287c7b70"}, {file = "pydantic-1.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:b1eb6610330a1dfba9ce142ada792f26bbef1255b75f538196a39e9e90388bf4"}, {file = "pydantic-1.10.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4ca83739c1263a044ec8b79df4eefc34bbac87191f0a513d00dd47d46e307a65"}, {file = "pydantic-1.10.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea4e2a7cb409951988e79a469f609bba998a576e6d7b9791ae5d1e0619e1c0f2"}, {file = "pydantic-1.10.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53de12b4608290992a943801d7756f18a37b7aee284b9ffa794ee8ea8153f8e2"}, {file = "pydantic-1.10.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:60184e80aac3b56933c71c48d6181e630b0fbc61ae455a63322a66a23c14731a"}, {file = "pydantic-1.10.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:415a3f719ce518e95a92effc7ee30118a25c3d032455d13e121e3840985f2efd"}, {file = "pydantic-1.10.6-cp37-cp37m-win_amd64.whl", hash = "sha256:72cb30894a34d3a7ab6d959b45a70abac8a2a93b6480fc5a7bfbd9c935bdc4fb"}, {file = "pydantic-1.10.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3091d2eaeda25391405e36c2fc2ed102b48bac4b384d42b2267310abae350ca6"}, {file = "pydantic-1.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:751f008cd2afe812a781fd6aa2fb66c620ca2e1a13b6a2152b1ad51553cb4b77"}, {file = "pydantic-1.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12e837fd320dd30bd625be1b101e3b62edc096a49835392dcf418f1a5ac2b832"}, {file = "pydantic-1.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d92831d0115874d766b1f5fddcdde0c5b6c60f8c6111a394078ec227fca6d"}, {file = "pydantic-1.10.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:476f6674303ae7965730a382a8e8d7fae18b8004b7b69a56c3d8fa93968aa21c"}, {file = "pydantic-1.10.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3a2be0a0f32c83265fd71a45027201e1278beaa82ea88ea5b345eea6afa9ac7f"}, {file = "pydantic-1.10.6-cp38-cp38-win_amd64.whl", hash = "sha256:0abd9c60eee6201b853b6c4be104edfba4f8f6c5f3623f8e1dba90634d63eb35"}, {file = "pydantic-1.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6195ca908045054dd2d57eb9c39a5fe86409968b8040de8c2240186da0769da7"}, {file = "pydantic-1.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43cdeca8d30de9a897440e3fb8866f827c4c31f6c73838e3a01a14b03b067b1d"}, {file = "pydantic-1.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c19eb5163167489cb1e0161ae9220dadd4fc609a42649e7e84a8fa8fff7a80f"}, {file = "pydantic-1.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:012c99a9c0d18cfde7469aa1ebff922e24b0c706d03ead96940f5465f2c9cf62"}, {file = "pydantic-1.10.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:528dcf7ec49fb5a84bf6fe346c1cc3c55b0e7603c2123881996ca3ad79db5bfc"}, {file = "pydantic-1.10.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:163e79386c3547c49366e959d01e37fc30252285a70619ffc1b10ede4758250a"}, {file = "pydantic-1.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:189318051c3d57821f7233ecc94708767dd67687a614a4e8f92b4a020d4ffd06"}, {file = "pydantic-1.10.6-py3-none-any.whl", hash = "sha256:acc6783751ac9c9bc4680379edd6d286468a1dc8d7d9906cd6f1186ed682b2b0"}, {file = "pydantic-1.10.6.tar.gz", hash = "sha256:cf95adb0d1671fc38d8c43dd921ad5814a735e7d9b4d9e437c088002863854fd"}, ] pytest = [ {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, ] pytest-asyncio = [ {file = "pytest-asyncio-0.17.2.tar.gz", hash = "sha256:6d895b02432c028e6957d25fc936494e78c6305736e785d9fee408b1efbc7ff4"}, {file = "pytest_asyncio-0.17.2-py3-none-any.whl", hash = "sha256:e0fe5dbea40516b661ef1bcfe0bd9461c2847c4ef4bb40012324f2454fb7d56d"}, ] pytest-cov = [ {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"}, ] PyYAML = [ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, ] requests = [ {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, ] toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] tomli = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] tqdm = [ {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, ] typing-extensions = [ {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, ] urllib3 = [ {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, ] yarl = [ {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bb81f753c815f6b8e2ddd2eef3c855cf7da193b82396ac013c661aaa6cc6b0a5"}, {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:47d49ac96156f0928f002e2424299b2c91d9db73e08c4cd6742923a086f1c863"}, {file = "yarl-1.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3fc056e35fa6fba63248d93ff6e672c096f95f7836938241ebc8260e062832fe"}, {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58a3c13d1c3005dbbac5c9f0d3210b60220a65a999b1833aa46bd6677c69b08e"}, {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10b08293cda921157f1e7c2790999d903b3fd28cd5c208cf8826b3b508026996"}, {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de986979bbd87272fe557e0a8fcb66fd40ae2ddfe28a8b1ce4eae22681728fef"}, {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4fcfa71e2c6a3cb568cf81aadc12768b9995323186a10827beccf5fa23d4f8"}, {file = "yarl-1.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae4d7ff1049f36accde9e1ef7301912a751e5bae0a9d142459646114c70ecba6"}, {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf071f797aec5b96abfc735ab97da9fd8f8768b43ce2abd85356a3127909d146"}, {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:74dece2bfc60f0f70907c34b857ee98f2c6dd0f75185db133770cd67300d505f"}, {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:df60a94d332158b444301c7f569659c926168e4d4aad2cfbf4bce0e8fb8be826"}, {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:63243b21c6e28ec2375f932a10ce7eda65139b5b854c0f6b82ed945ba526bff3"}, {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cfa2bbca929aa742b5084fd4663dd4b87c191c844326fcb21c3afd2d11497f80"}, {file = "yarl-1.8.2-cp310-cp310-win32.whl", hash = "sha256:b05df9ea7496df11b710081bd90ecc3a3db6adb4fee36f6a411e7bc91a18aa42"}, {file = "yarl-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:24ad1d10c9db1953291f56b5fe76203977f1ed05f82d09ec97acb623a7976574"}, {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2a1fca9588f360036242f379bfea2b8b44cae2721859b1c56d033adfd5893634"}, {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f37db05c6051eff17bc832914fe46869f8849de5b92dc4a3466cd63095d23dfd"}, {file = "yarl-1.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77e913b846a6b9c5f767b14dc1e759e5aff05502fe73079f6f4176359d832581"}, {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0978f29222e649c351b173da2b9b4665ad1feb8d1daa9d971eb90df08702668a"}, {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388a45dc77198b2460eac0aca1efd6a7c09e976ee768b0d5109173e521a19daf"}, {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2305517e332a862ef75be8fad3606ea10108662bc6fe08509d5ca99503ac2aee"}, {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42430ff511571940d51e75cf42f1e4dbdded477e71c1b7a17f4da76c1da8ea76"}, {file = "yarl-1.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3150078118f62371375e1e69b13b48288e44f6691c1069340081c3fd12c94d5b"}, {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c15163b6125db87c8f53c98baa5e785782078fbd2dbeaa04c6141935eb6dab7a"}, {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4d04acba75c72e6eb90745447d69f84e6c9056390f7a9724605ca9c56b4afcc6"}, {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e7fd20d6576c10306dea2d6a5765f46f0ac5d6f53436217913e952d19237efc4"}, {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75c16b2a900b3536dfc7014905a128a2bea8fb01f9ee26d2d7d8db0a08e7cb2c"}, {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6d88056a04860a98341a0cf53e950e3ac9f4e51d1b6f61a53b0609df342cc8b2"}, {file = "yarl-1.8.2-cp311-cp311-win32.whl", hash = "sha256:fb742dcdd5eec9f26b61224c23baea46c9055cf16f62475e11b9b15dfd5c117b"}, {file = "yarl-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c46d3d89902c393a1d1e243ac847e0442d0196bbd81aecc94fcebbc2fd5857c"}, {file = "yarl-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ceff9722e0df2e0a9e8a79c610842004fa54e5b309fe6d218e47cd52f791d7ef"}, {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f6b4aca43b602ba0f1459de647af954769919c4714706be36af670a5f44c9c1"}, {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1684a9bd9077e922300ecd48003ddae7a7474e0412bea38d4631443a91d61077"}, {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebb78745273e51b9832ef90c0898501006670d6e059f2cdb0e999494eb1450c2"}, {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3adeef150d528ded2a8e734ebf9ae2e658f4c49bf413f5f157a470e17a4a2e89"}, {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a7c87927a468e5a1dc60c17caf9597161d66457a34273ab1760219953f7f4c"}, {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:efff27bd8cbe1f9bd127e7894942ccc20c857aa8b5a0327874f30201e5ce83d0"}, {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a783cd344113cb88c5ff7ca32f1f16532a6f2142185147822187913eb989f739"}, {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:705227dccbe96ab02c7cb2c43e1228e2826e7ead880bb19ec94ef279e9555b5b"}, {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:34c09b43bd538bf6c4b891ecce94b6fa4f1f10663a8d4ca589a079a5018f6ed7"}, {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a48f4f7fea9a51098b02209d90297ac324241bf37ff6be6d2b0149ab2bd51b37"}, {file = "yarl-1.8.2-cp37-cp37m-win32.whl", hash = "sha256:0414fd91ce0b763d4eadb4456795b307a71524dbacd015c657bb2a39db2eab89"}, {file = "yarl-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d881d152ae0007809c2c02e22aa534e702f12071e6b285e90945aa3c376463c5"}, {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5df5e3d04101c1e5c3b1d69710b0574171cc02fddc4b23d1b2813e75f35a30b1"}, {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a66c506ec67eb3159eea5096acd05f5e788ceec7b96087d30c7d2865a243918"}, {file = "yarl-1.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2b4fa2606adf392051d990c3b3877d768771adc3faf2e117b9de7eb977741229"}, {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e21fb44e1eff06dd6ef971d4bdc611807d6bd3691223d9c01a18cec3677939e"}, {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93202666046d9edadfe9f2e7bf5e0782ea0d497b6d63da322e541665d65a044e"}, {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc77086ce244453e074e445104f0ecb27530d6fd3a46698e33f6c38951d5a0f1"}, {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dd68a92cab699a233641f5929a40f02a4ede8c009068ca8aa1fe87b8c20ae3"}, {file = "yarl-1.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b372aad2b5f81db66ee7ec085cbad72c4da660d994e8e590c997e9b01e44901"}, {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e6f3515aafe0209dd17fb9bdd3b4e892963370b3de781f53e1746a521fb39fc0"}, {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dfef7350ee369197106805e193d420b75467b6cceac646ea5ed3049fcc950a05"}, {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:728be34f70a190566d20aa13dc1f01dc44b6aa74580e10a3fb159691bc76909d"}, {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ff205b58dc2929191f68162633d5e10e8044398d7a45265f90a0f1d51f85f72c"}, {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baf211dcad448a87a0d9047dc8282d7de59473ade7d7fdf22150b1d23859f946"}, {file = "yarl-1.8.2-cp38-cp38-win32.whl", hash = "sha256:272b4f1599f1b621bf2aabe4e5b54f39a933971f4e7c9aa311d6d7dc06965165"}, {file = "yarl-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:326dd1d3caf910cd26a26ccbfb84c03b608ba32499b5d6eeb09252c920bcbe4f"}, {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f8ca8ad414c85bbc50f49c0a106f951613dfa5f948ab69c10ce9b128d368baf8"}, {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:418857f837347e8aaef682679f41e36c24250097f9e2f315d39bae3a99a34cbf"}, {file = "yarl-1.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ae0eec05ab49e91a78700761777f284c2df119376e391db42c38ab46fd662b77"}, {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:009a028127e0a1755c38b03244c0bea9d5565630db9c4cf9572496e947137a87"}, {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3edac5d74bb3209c418805bda77f973117836e1de7c000e9755e572c1f7850d0"}, {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da65c3f263729e47351261351b8679c6429151ef9649bba08ef2528ff2c423b2"}, {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef8fb25e52663a1c85d608f6dd72e19bd390e2ecaf29c17fb08f730226e3a08"}, {file = "yarl-1.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcd7bb1e5c45274af9a1dd7494d3c52b2be5e6bd8d7e49c612705fd45420b12d"}, {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44ceac0450e648de86da8e42674f9b7077d763ea80c8ceb9d1c3e41f0f0a9951"}, {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:97209cc91189b48e7cfe777237c04af8e7cc51eb369004e061809bcdf4e55220"}, {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:48dd18adcf98ea9cd721a25313aef49d70d413a999d7d89df44f469edfb38a06"}, {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e59399dda559688461762800d7fb34d9e8a6a7444fd76ec33220a926c8be1516"}, {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d617c241c8c3ad5c4e78a08429fa49e4b04bedfc507b34b4d8dceb83b4af3588"}, {file = "yarl-1.8.2-cp39-cp39-win32.whl", hash = "sha256:cb6d48d80a41f68de41212f3dfd1a9d9898d7841c8f7ce6696cf2fd9cb57ef83"}, {file = "yarl-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:6604711362f2dbf7160df21c416f81fac0de6dbcf0b5445a2ef25478ecc4c778"}, {file = "yarl-1.8.2.tar.gz", hash = "sha256:49d43402c6e3013ad0978602bf6bf5328535c48d192304b91b97a3c6790b1562"}, ] zipp = [ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, ]
0
hf_public_repos/text-generation-inference/clients
hf_public_repos/text-generation-inference/clients/python/pyproject.toml
[tool.poetry] name = "text-generation" version = "0.6.0" description = "Hugging Face Text Generation Python Client" license = "Apache-2.0" authors = ["Olivier Dehaene <olivier@huggingface.co>"] maintainers = ["Olivier Dehaene <olivier@huggingface.co>"] readme = "README.md" homepage = "https://github.com/huggingface/text-generation-inference" repository = "https://github.com/huggingface/text-generation-inference" [tool.poetry.dependencies] python = "^3.7" pydantic = "^1.10" aiohttp = "^3.8" huggingface-hub = ">= 0.12, < 1.0" [tool.poetry.dev-dependencies] pytest = "^6.2.5" pytest-asyncio = "^0.17.2" pytest-cov = "^3.0.0" [tool.pytest.ini_options] asyncio_mode = "auto" [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api"
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/tests/conftest.py
import pytest from text_generation import __version__ from huggingface_hub.utils import build_hf_headers @pytest.fixture def flan_t5_xxl(): return "google/flan-t5-xxl" @pytest.fixture def fake_model(): return "fake/model" @pytest.fixture def unsupported_model(): return "gpt2" @pytest.fixture def base_url(): return "https://api-inference.huggingface.co/models" @pytest.fixture def bloom_url(base_url, bloom_model): return f"{base_url}/{bloom_model}" @pytest.fixture def flan_t5_xxl_url(base_url, flan_t5_xxl): return f"{base_url}/{flan_t5_xxl}" @pytest.fixture def fake_url(base_url, fake_model): return f"{base_url}/{fake_model}" @pytest.fixture def unsupported_url(base_url, unsupported_model): return f"{base_url}/{unsupported_model}" @pytest.fixture(scope="session") def hf_headers(): return build_hf_headers( library_name="text-generation-tests", library_version=__version__ )
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/tests/test_client.py
import pytest from text_generation import Client, AsyncClient from text_generation.errors import NotFoundError, ValidationError from text_generation.types import FinishReason, InputToken def test_generate(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) response = client.generate("test", max_new_tokens=1, decoder_input_details=True) assert response.generated_text == "" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None assert len(response.details.prefill) == 1 assert response.details.prefill[0] == InputToken(id=0, text="<pad>", logprob=None) assert len(response.details.tokens) == 1 assert response.details.tokens[0].id == 3 assert response.details.tokens[0].text == " " assert not response.details.tokens[0].special def test_generate_best_of(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) response = client.generate( "test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True ) assert response.details.seed is not None assert response.details.best_of_sequences is not None assert len(response.details.best_of_sequences) == 1 assert response.details.best_of_sequences[0].seed is not None def test_generate_not_found(fake_url, hf_headers): client = Client(fake_url, hf_headers) with pytest.raises(NotFoundError): client.generate("test") def test_generate_validation_error(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) with pytest.raises(ValidationError): client.generate("test", max_new_tokens=10_000) def test_generate_stream(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) responses = [ response for response in client.generate_stream("test", max_new_tokens=1) ] assert len(responses) == 1 response = responses[0] assert response.generated_text == "" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None def test_generate_stream_not_found(fake_url, hf_headers): client = Client(fake_url, hf_headers) with pytest.raises(NotFoundError): list(client.generate_stream("test")) def test_generate_stream_validation_error(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) with pytest.raises(ValidationError): list(client.generate_stream("test", max_new_tokens=10_000)) @pytest.mark.asyncio async def test_generate_async(flan_t5_xxl_url, hf_headers): client = AsyncClient(flan_t5_xxl_url, hf_headers) response = await client.generate( "test", max_new_tokens=1, decoder_input_details=True ) assert response.generated_text == "" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None assert len(response.details.prefill) == 1 assert response.details.prefill[0] == InputToken(id=0, text="<pad>", logprob=None) assert len(response.details.tokens) == 1 assert response.details.tokens[0].id == 3 assert response.details.tokens[0].text == " " assert not response.details.tokens[0].special @pytest.mark.asyncio async def test_generate_async_best_of(flan_t5_xxl_url, hf_headers): client = AsyncClient(flan_t5_xxl_url, hf_headers) response = await client.generate( "test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True ) assert response.details.seed is not None assert response.details.best_of_sequences is not None assert len(response.details.best_of_sequences) == 1 assert response.details.best_of_sequences[0].seed is not None @pytest.mark.asyncio async def test_generate_async_not_found(fake_url, hf_headers): client = AsyncClient(fake_url, hf_headers) with pytest.raises(NotFoundError): await client.generate("test") @pytest.mark.asyncio async def test_generate_async_validation_error(flan_t5_xxl_url, hf_headers): client = AsyncClient(flan_t5_xxl_url, hf_headers) with pytest.raises(ValidationError): await client.generate("test", max_new_tokens=10_000) @pytest.mark.asyncio async def test_generate_stream_async(flan_t5_xxl_url, hf_headers): client = AsyncClient(flan_t5_xxl_url, hf_headers) responses = [ response async for response in client.generate_stream("test", max_new_tokens=1) ] assert len(responses) == 1 response = responses[0] assert response.generated_text == "" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None @pytest.mark.asyncio async def test_generate_stream_async_not_found(fake_url, hf_headers): client = AsyncClient(fake_url, hf_headers) with pytest.raises(NotFoundError): async for _ in client.generate_stream("test"): pass @pytest.mark.asyncio async def test_generate_stream_async_validation_error(flan_t5_xxl_url, hf_headers): client = AsyncClient(flan_t5_xxl_url, hf_headers) with pytest.raises(ValidationError): async for _ in client.generate_stream("test", max_new_tokens=10_000): pass
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/tests/test_errors.py
from text_generation.errors import ( parse_error, GenerationError, IncompleteGenerationError, OverloadedError, ValidationError, BadRequestError, ShardNotReadyError, ShardTimeoutError, NotFoundError, RateLimitExceededError, UnknownError, ) def test_generation_error(): payload = {"error_type": "generation", "error": "test"} assert isinstance(parse_error(400, payload), GenerationError) def test_incomplete_generation_error(): payload = {"error_type": "incomplete_generation", "error": "test"} assert isinstance(parse_error(400, payload), IncompleteGenerationError) def test_overloaded_error(): payload = {"error_type": "overloaded", "error": "test"} assert isinstance(parse_error(400, payload), OverloadedError) def test_validation_error(): payload = {"error_type": "validation", "error": "test"} assert isinstance(parse_error(400, payload), ValidationError) def test_bad_request_error(): payload = {"error": "test"} assert isinstance(parse_error(400, payload), BadRequestError) def test_shard_not_ready_error(): payload = {"error": "test"} assert isinstance(parse_error(403, payload), ShardNotReadyError) assert isinstance(parse_error(424, payload), ShardNotReadyError) def test_shard_timeout_error(): payload = {"error": "test"} assert isinstance(parse_error(504, payload), ShardTimeoutError) def test_not_found_error(): payload = {"error": "test"} assert isinstance(parse_error(404, payload), NotFoundError) def test_rate_limit_exceeded_error(): payload = {"error": "test"} assert isinstance(parse_error(429, payload), RateLimitExceededError) def test_unknown_error(): payload = {"error": "test"} assert isinstance(parse_error(500, payload), UnknownError)
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/tests/test_inference_api.py
import pytest from text_generation import ( InferenceAPIClient, InferenceAPIAsyncClient, Client, AsyncClient, ) from text_generation.errors import NotSupportedError, NotFoundError from text_generation.inference_api import check_model_support, deployed_models def test_check_model_support(flan_t5_xxl, unsupported_model, fake_model): assert check_model_support(flan_t5_xxl) assert not check_model_support(unsupported_model) with pytest.raises(NotFoundError): check_model_support(fake_model) def test_deployed_models(): deployed_models() def test_client(flan_t5_xxl): client = InferenceAPIClient(flan_t5_xxl) assert isinstance(client, Client) def test_client_unsupported_model(unsupported_model): with pytest.raises(NotSupportedError): InferenceAPIClient(unsupported_model) def test_async_client(flan_t5_xxl): client = InferenceAPIAsyncClient(flan_t5_xxl) assert isinstance(client, AsyncClient) def test_async_client_unsupported_model(unsupported_model): with pytest.raises(NotSupportedError): InferenceAPIAsyncClient(unsupported_model)
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/tests/test_types.py
import pytest from text_generation.types import Parameters, Request from text_generation.errors import ValidationError def test_parameters_validation(): # Test best_of Parameters(best_of=1) with pytest.raises(ValidationError): Parameters(best_of=0) with pytest.raises(ValidationError): Parameters(best_of=-1) Parameters(best_of=2, do_sample=True) with pytest.raises(ValidationError): Parameters(best_of=2) with pytest.raises(ValidationError): Parameters(best_of=2, seed=1) # Test repetition_penalty Parameters(repetition_penalty=1) with pytest.raises(ValidationError): Parameters(repetition_penalty=0) with pytest.raises(ValidationError): Parameters(repetition_penalty=-1) # Test seed Parameters(seed=1) with pytest.raises(ValidationError): Parameters(seed=-1) # Test temperature Parameters(temperature=1) with pytest.raises(ValidationError): Parameters(temperature=0) with pytest.raises(ValidationError): Parameters(temperature=-1) # Test top_k Parameters(top_k=1) with pytest.raises(ValidationError): Parameters(top_k=0) with pytest.raises(ValidationError): Parameters(top_k=-1) # Test top_p Parameters(top_p=0.5) with pytest.raises(ValidationError): Parameters(top_p=0) with pytest.raises(ValidationError): Parameters(top_p=-1) with pytest.raises(ValidationError): Parameters(top_p=1) # Test truncate Parameters(truncate=1) with pytest.raises(ValidationError): Parameters(truncate=0) with pytest.raises(ValidationError): Parameters(truncate=-1) # Test typical_p Parameters(typical_p=0.5) with pytest.raises(ValidationError): Parameters(typical_p=0) with pytest.raises(ValidationError): Parameters(typical_p=-1) with pytest.raises(ValidationError): Parameters(typical_p=1) def test_request_validation(): Request(inputs="test") with pytest.raises(ValidationError): Request(inputs="") Request(inputs="test", stream=True) Request(inputs="test", parameters=Parameters(best_of=2, do_sample=True)) with pytest.raises(ValidationError): Request( inputs="test", parameters=Parameters(best_of=2, do_sample=True), stream=True )
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/text_generation/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.3.0" from text_generation.client import Client, AsyncClient from text_generation.inference_api import InferenceAPIClient, InferenceAPIAsyncClient
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/text_generation/client.py
import json import requests from aiohttp import ClientSession, ClientTimeout from pydantic import ValidationError from typing import Dict, Optional, List, AsyncIterator, Iterator from text_generation.types import ( StreamResponse, Response, Request, Parameters, ) from text_generation.errors import parse_error class Client: """Client to make calls to a text-generation-inference instance Example: ```python >>> from text_generation import Client >>> client = Client("https://api-inference.huggingface.co/models/bigscience/bloomz") >>> client.generate("Why is the sky blue?").generated_text ' Rayleigh scattering' >>> result = "" >>> for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__( self, base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10, ): """ Args: base_url (`str`): text-generation-inference instance base url headers (`Optional[Dict[str, str]]`): Additional headers cookies (`Optional[Dict[str, str]]`): Cookies to include in the requests timeout (`int`): Timeout in seconds """ self.base_url = base_url self.headers = headers self.cookies = cookies self.timeout = timeout def generate( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, decoder_input_details: bool = False, ) -> Response: """ Given a prompt, generate the following text Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens best_of (`int`): Generate best_of sequences and return the one if the highest token logprobs repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) decoder_input_details (`bool`): Return the decoder input token logprobs and ids Returns: Response: generated response """ # Validate parameters parameters = Parameters( best_of=best_of, details=True, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, decoder_input_details=decoder_input_details, ) request = Request(inputs=prompt, stream=False, parameters=parameters) resp = requests.post( self.base_url, json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) return Response(**payload[0]) def generate_stream( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, ) -> Iterator[StreamResponse]: """ Given a prompt, generate the following stream of tokens Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) Returns: Iterator[StreamResponse]: stream of generated tokens """ # Validate parameters parameters = Parameters( best_of=None, details=True, decoder_input_details=False, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, ) request = Request(inputs=prompt, stream=True, parameters=parameters) resp = requests.post( self.base_url, json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, stream=True, ) if resp.status_code != 200: raise parse_error(resp.status_code, resp.json()) # Parse ServerSentEvents for byte_payload in resp.iter_lines(): # Skip line if byte_payload == b"\n": continue payload = byte_payload.decode("utf-8") # Event data if payload.startswith("data:"): # Decode payload json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) # Parse payload try: response = StreamResponse(**json_payload) except ValidationError: # If we failed to parse the payload, then it is an error payload raise parse_error(resp.status_code, json_payload) yield response class AsyncClient: """Asynchronous Client to make calls to a text-generation-inference instance Example: ```python >>> from text_generation import AsyncClient >>> client = AsyncClient("https://api-inference.huggingface.co/models/bigscience/bloomz") >>> response = await client.generate("Why is the sky blue?") >>> response.generated_text ' Rayleigh scattering' >>> result = "" >>> async for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__( self, base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10, ): """ Args: base_url (`str`): text-generation-inference instance base url headers (`Optional[Dict[str, str]]`): Additional headers cookies (`Optional[Dict[str, str]]`): Cookies to include in the requests timeout (`int`): Timeout in seconds """ self.base_url = base_url self.headers = headers self.cookies = cookies self.timeout = ClientTimeout(timeout * 60) async def generate( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, decoder_input_details: bool = False, ) -> Response: """ Given a prompt, generate the following text asynchronously Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens best_of (`int`): Generate best_of sequences and return the one if the highest token logprobs repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) decoder_input_details (`bool`): Return the decoder input token logprobs and ids Returns: Response: generated response """ # Validate parameters parameters = Parameters( best_of=best_of, details=True, decoder_input_details=decoder_input_details, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, ) request = Request(inputs=prompt, stream=False, parameters=parameters) async with ClientSession( headers=self.headers, cookies=self.cookies, timeout=self.timeout ) as session: async with session.post(self.base_url, json=request.dict()) as resp: payload = await resp.json() if resp.status != 200: raise parse_error(resp.status, payload) return Response(**payload[0]) async def generate_stream( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, ) -> AsyncIterator[StreamResponse]: """ Given a prompt, generate the following stream of tokens asynchronously Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) Returns: AsyncIterator[StreamResponse]: stream of generated tokens """ # Validate parameters parameters = Parameters( best_of=None, details=True, decoder_input_details=False, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, ) request = Request(inputs=prompt, stream=True, parameters=parameters) async with ClientSession( headers=self.headers, cookies=self.cookies, timeout=self.timeout ) as session: async with session.post(self.base_url, json=request.dict()) as resp: if resp.status != 200: raise parse_error(resp.status, await resp.json()) # Parse ServerSentEvents async for byte_payload in resp.content: # Skip line if byte_payload == b"\n": continue payload = byte_payload.decode("utf-8") # Event data if payload.startswith("data:"): # Decode payload json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) # Parse payload try: response = StreamResponse(**json_payload) except ValidationError: # If we failed to parse the payload, then it is an error payload raise parse_error(resp.status, json_payload) yield response
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/text_generation/errors.py
from typing import Dict # Text Generation Inference Errors class ValidationError(Exception): def __init__(self, message: str): super().__init__(message) class GenerationError(Exception): def __init__(self, message: str): super().__init__(message) class OverloadedError(Exception): def __init__(self, message: str): super().__init__(message) class IncompleteGenerationError(Exception): def __init__(self, message: str): super().__init__(message) # API Inference Errors class BadRequestError(Exception): def __init__(self, message: str): super().__init__(message) class ShardNotReadyError(Exception): def __init__(self, message: str): super().__init__(message) class ShardTimeoutError(Exception): def __init__(self, message: str): super().__init__(message) class NotFoundError(Exception): def __init__(self, message: str): super().__init__(message) class RateLimitExceededError(Exception): def __init__(self, message: str): super().__init__(message) class NotSupportedError(Exception): def __init__(self, model_id: str): message = ( f"Model `{model_id}` is not available for inference with this client. \n" "Use `huggingface_hub.inference_api.InferenceApi` instead." ) super(NotSupportedError, self).__init__(message) # Unknown error class UnknownError(Exception): def __init__(self, message: str): super().__init__(message) def parse_error(status_code: int, payload: Dict[str, str]) -> Exception: """ Parse error given an HTTP status code and a json payload Args: status_code (`int`): HTTP status code payload (`Dict[str, str]`): Json payload Returns: Exception: parsed exception """ # Try to parse a Text Generation Inference error message = payload["error"] if "error_type" in payload: error_type = payload["error_type"] if error_type == "generation": return GenerationError(message) if error_type == "incomplete_generation": return IncompleteGenerationError(message) if error_type == "overloaded": return OverloadedError(message) if error_type == "validation": return ValidationError(message) # Try to parse a APIInference error if status_code == 400: return BadRequestError(message) if status_code == 403 or status_code == 424: return ShardNotReadyError(message) if status_code == 504: return ShardTimeoutError(message) if status_code == 404: return NotFoundError(message) if status_code == 429: return RateLimitExceededError(message) # Fallback to an unknown error return UnknownError(message)
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/text_generation/inference_api.py
import os import requests from typing import Dict, Optional, List from huggingface_hub.utils import build_hf_headers from text_generation import Client, AsyncClient, __version__ from text_generation.types import DeployedModel from text_generation.errors import NotSupportedError, parse_error INFERENCE_ENDPOINT = os.environ.get( "HF_INFERENCE_ENDPOINT", "https://api-inference.huggingface.co" ) def deployed_models(headers: Optional[Dict] = None) -> List[DeployedModel]: """ Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models """ resp = requests.get( f"https://api-inference.huggingface.co/framework/text-generation-inference", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) models = [DeployedModel(**raw_deployed_model) for raw_deployed_model in payload] return models def check_model_support(repo_id: str, headers: Optional[Dict] = None) -> bool: """ Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client """ resp = requests.get( f"https://api-inference.huggingface.co/status/{repo_id}", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) framework = payload["framework"] supported = framework == "text-generation-inference" return supported class InferenceAPIClient(Client): """Client to make calls to the HuggingFace Inference API. Only supports a subset of the available text-generation or text2text-generation models that are served using text-generation-inference Example: ```python >>> from text_generation import InferenceAPIClient >>> client = InferenceAPIClient("bigscience/bloomz") >>> client.generate("Why is the sky blue?").generated_text ' Rayleigh scattering' >>> result = "" >>> for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10): """ Init headers and API information Args: repo_id (`str`): Id of repository (e.g. `bigscience/bloom`). token (`str`, `optional`): The API token to use as HTTP bearer authorization. This is not the authentication token. You can find the token in https://huggingface.co/settings/token. Alternatively, you can find both your organizations and personal API tokens using `HfApi().whoami(token)`. timeout (`int`): Timeout in seconds """ headers = build_hf_headers( token=token, library_name="text-generation", library_version=__version__ ) # Text Generation Inference client only supports a subset of the available hub models if not check_model_support(repo_id, headers): raise NotSupportedError(repo_id) base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}" super(InferenceAPIClient, self).__init__( base_url, headers=headers, timeout=timeout ) class InferenceAPIAsyncClient(AsyncClient): """Aynschronous Client to make calls to the HuggingFace Inference API. Only supports a subset of the available text-generation or text2text-generation models that are served using text-generation-inference Example: ```python >>> from text_generation import InferenceAPIAsyncClient >>> client = InferenceAPIAsyncClient("bigscience/bloomz") >>> response = await client.generate("Why is the sky blue?") >>> response.generated_text ' Rayleigh scattering' >>> result = "" >>> async for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10): """ Init headers and API information Args: repo_id (`str`): Id of repository (e.g. `bigscience/bloom`). token (`str`, `optional`): The API token to use as HTTP bearer authorization. This is not the authentication token. You can find the token in https://huggingface.co/settings/token. Alternatively, you can find both your organizations and personal API tokens using `HfApi().whoami(token)`. timeout (`int`): Timeout in seconds """ headers = build_hf_headers( token=token, library_name="text-generation", library_version=__version__ ) # Text Generation Inference client only supports a subset of the available hub models if not check_model_support(repo_id, headers): raise NotSupportedError(repo_id) base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}" super(InferenceAPIAsyncClient, self).__init__( base_url, headers=headers, timeout=timeout )
0
hf_public_repos/text-generation-inference/clients/python
hf_public_repos/text-generation-inference/clients/python/text_generation/types.py
from enum import Enum from pydantic import BaseModel, validator from typing import Optional, List from text_generation.errors import ValidationError class Parameters(BaseModel): # Activate logits sampling do_sample: bool = False # Maximum number of generated tokens max_new_tokens: int = 20 # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] = None # Whether to prepend the prompt to the generated text return_full_text: bool = False # Stop generating tokens if a member of `stop_sequences` is generated stop: List[str] = [] # Random sampling seed seed: Optional[int] # The value used to module the logits distribution. temperature: Optional[float] # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_k: Optional[int] # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or # higher are kept for generation. top_p: Optional[float] # truncate inputs tokens to the given size truncate: Optional[int] # Typical Decoding mass # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information typical_p: Optional[float] # Generate best_of sequences and return the one if the highest token logprobs best_of: Optional[int] # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) watermark: bool = False # Get generation details details: bool = False # Get decoder input token logprobs and ids decoder_input_details: bool = False @validator("best_of") def valid_best_of(cls, field_value, values): if field_value is not None: if field_value <= 0: raise ValidationError("`best_of` must be strictly positive") if field_value > 1 and values["seed"] is not None: raise ValidationError("`seed` must not be set when `best_of` is > 1") sampling = ( values["do_sample"] | (values["temperature"] is not None) | (values["top_k"] is not None) | (values["top_p"] is not None) | (values["typical_p"] is not None) ) if field_value > 1 and not sampling: raise ValidationError("you must use sampling when `best_of` is > 1") return field_value @validator("repetition_penalty") def valid_repetition_penalty(cls, v): if v is not None and v <= 0: raise ValidationError("`repetition_penalty` must be strictly positive") return v @validator("seed") def valid_seed(cls, v): if v is not None and v < 0: raise ValidationError("`seed` must be positive") return v @validator("temperature") def valid_temp(cls, v): if v is not None and v <= 0: raise ValidationError("`temperature` must be strictly positive") return v @validator("top_k") def valid_top_k(cls, v): if v is not None and v <= 0: raise ValidationError("`top_k` must be strictly positive") return v @validator("top_p") def valid_top_p(cls, v): if v is not None and (v <= 0 or v >= 1.0): raise ValidationError("`top_p` must be > 0.0 and < 1.0") return v @validator("truncate") def valid_truncate(cls, v): if v is not None and v <= 0: raise ValidationError("`truncate` must be strictly positive") return v @validator("typical_p") def valid_typical_p(cls, v): if v is not None and (v <= 0 or v >= 1.0): raise ValidationError("`typical_p` must be > 0.0 and < 1.0") return v class Request(BaseModel): # Prompt inputs: str # Generation parameters parameters: Optional[Parameters] # Whether to stream output tokens stream: bool = False @validator("inputs") def valid_input(cls, v): if not v: raise ValidationError("`inputs` cannot be empty") return v @validator("stream") def valid_best_of_stream(cls, field_value, values): parameters = values["parameters"] if ( parameters is not None and parameters.best_of is not None and parameters.best_of > 1 and field_value ): raise ValidationError( "`best_of` != 1 is not supported when `stream` == True" ) return field_value # Decoder input tokens class InputToken(BaseModel): # Token ID from the model tokenizer id: int # Token text text: str # Logprob # Optional since the logprob of the first token cannot be computed logprob: Optional[float] # Generated tokens class Token(BaseModel): # Token ID from the model tokenizer id: int # Token text text: str # Logprob logprob: float # Is the token a special token # Can be used to ignore tokens when concatenating special: bool # Generation finish reason class FinishReason(str, Enum): # number of generated tokens == `max_new_tokens` Length = "length" # the model generated its end of sequence token EndOfSequenceToken = "eos_token" # the model generated a text included in `stop_sequences` StopSequence = "stop_sequence" # Additional sequences when using the `best_of` parameter class BestOfSequence(BaseModel): # Generated text generated_text: str # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # `generate` details class Details(BaseModel): # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Additional sequences when using the `best_of` parameter best_of_sequences: Optional[List[BestOfSequence]] # `generate` return value class Response(BaseModel): # Generated text generated_text: str # Generation details details: Details # `generate_stream` details class StreamDetails(BaseModel): # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # `generate_stream` return value class StreamResponse(BaseModel): # Generated token token: Token # Complete generated text # Only available when the generation is finished generated_text: Optional[str] # Generation details # Only available when the generation is finished details: Optional[StreamDetails] # Inference API currently deployed model class DeployedModel(BaseModel): model_id: str sha: str
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/docs/index.html
<html> <head> <!-- Load the latest Swagger UI code and style from npm using unpkg.com --> <script src="https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js"></script> <link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@3/swagger-ui.css"/> <title>Text Generation Inference API</title> </head> <body> <div id="swagger-ui"></div> <!-- Div to hold the UI component --> <script> window.onload = function () { // Begin Swagger UI call region const ui = SwaggerUIBundle({ url: "openapi.json", //Location of Open API spec in the repo dom_id: '#swagger-ui', deepLinking: true, supportedSubmitMethods: [], presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], plugins: [ SwaggerUIBundle.plugins.DownloadUrl ], }) window.ui = ui } </script> </body> </html>
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/docs/openapi.json
{ "openapi": "3.0.3", "info": { "title": "Text Generation Inference", "description": "Text Generation Webserver", "contact": { "name": "Olivier Dehaene" }, "license": { "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, "version": "1.0.0" }, "paths": { "/": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "description": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "operationId": "compat_generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CompatGenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens", "description": "Generate tokens", "operationId": "generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate_stream": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate a stream of token using Server-Sent Events", "description": "Generate a stream of token using Server-Sent Events", "operationId": "generate_stream", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/health": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Health check method", "description": "Health check method", "operationId": "health", "responses": { "200": { "description": "Everything is working fine" }, "503": { "description": "Text generation inference is down", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "unhealthy", "error_type": "healthcheck" } } } } } } }, "/info": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Text Generation Inference endpoint info", "description": "Text Generation Inference endpoint info", "operationId": "get_model_info", "responses": { "200": { "description": "Served model info", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Info" } } } } } } }, "/metrics": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Prometheus metrics scrape endpoint", "description": "Prometheus metrics scrape endpoint", "operationId": "metrics", "responses": { "200": { "description": "Prometheus Metrics", "content": { "text/plain": { "schema": { "type": "string" } } } } } } } }, "components": { "schemas": { "BestOfSequence": { "type": "object", "required": [ "generated_text", "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_text": { "type": "string", "example": "test" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0.0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0.0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } }, "CompatGenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" }, "stream": { "type": "boolean", "default": "false" } } }, "Details": { "type": "object", "required": [ "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "best_of_sequences": { "type": "array", "items": { "$ref": "#/components/schemas/BestOfSequence" }, "nullable": true }, "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0.0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0.0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } }, "ErrorResponse": { "type": "object", "required": [ "error", "error_type" ], "properties": { "error": { "type": "string" }, "error_type": { "type": "string" } } }, "FinishReason": { "type": "string", "enum": [ "length", "eos_token", "stop_sequence" ] }, "GenerateParameters": { "type": "object", "properties": { "best_of": { "type": "integer", "default": "null", "example": 1, "nullable": true, "minimum": 0.0, "exclusiveMinimum": 0.0 }, "decoder_input_details": { "type": "boolean", "default": "true" }, "details": { "type": "boolean", "default": "true" }, "do_sample": { "type": "boolean", "default": "false", "example": true }, "max_new_tokens": { "type": "integer", "format": "int32", "default": "20", "minimum": 0.0, "exclusiveMaximum": 512.0, "exclusiveMinimum": 0.0 }, "repetition_penalty": { "type": "number", "format": "float", "default": "null", "example": 1.03, "nullable": true, "exclusiveMinimum": 0.0 }, "return_full_text": { "type": "boolean", "default": "null", "example": false, "nullable": true }, "seed": { "type": "integer", "format": "int64", "default": "null", "example": "null", "nullable": true, "minimum": 0.0, "exclusiveMinimum": 0.0 }, "stop": { "type": "array", "items": { "type": "string" }, "example": [ "photographer" ], "maxItems": 4 }, "temperature": { "type": "number", "format": "float", "default": "null", "example": 0.5, "nullable": true, "exclusiveMinimum": 0.0 }, "top_k": { "type": "integer", "format": "int32", "default": "null", "example": 10, "nullable": true, "exclusiveMinimum": 0.0 }, "top_p": { "type": "number", "format": "float", "default": "null", "example": 0.95, "nullable": true, "maximum": 1.0, "exclusiveMinimum": 0.0 }, "truncate": { "type": "integer", "default": "null", "example": "null", "nullable": true, "minimum": 0.0 }, "typical_p": { "type": "number", "format": "float", "default": "null", "example": 0.95, "nullable": true, "maximum": 1.0, "exclusiveMinimum": 0.0 }, "watermark": { "type": "boolean", "default": "false", "example": true } } }, "GenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" } } }, "GenerateResponse": { "type": "object", "required": [ "generated_text" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/Details" } ], "nullable": true }, "generated_text": { "type": "string", "example": "test" } } }, "Info": { "type": "object", "required": [ "model_id", "model_dtype", "model_device_type", "max_concurrent_requests", "max_best_of", "max_stop_sequences", "max_input_length", "max_total_tokens", "waiting_served_ratio", "max_batch_total_tokens", "max_waiting_tokens", "validation_workers", "version" ], "properties": { "docker_label": { "type": "string", "example": "null", "nullable": true }, "max_batch_total_tokens": { "type": "integer", "format": "int32", "example": "32000", "minimum": 0.0 }, "max_best_of": { "type": "integer", "example": "2", "minimum": 0.0 }, "max_concurrent_requests": { "type": "integer", "description": "Router Parameters", "example": "128", "minimum": 0.0 }, "max_input_length": { "type": "integer", "example": "1024", "minimum": 0.0 }, "max_stop_sequences": { "type": "integer", "example": "4", "minimum": 0.0 }, "max_total_tokens": { "type": "integer", "example": "2048", "minimum": 0.0 }, "max_waiting_tokens": { "type": "integer", "example": "20", "minimum": 0.0 }, "model_device_type": { "type": "string", "example": "cuda" }, "model_dtype": { "type": "string", "example": "torch.float16" }, "model_id": { "type": "string", "description": "Model info", "example": "bigscience/blomm-560m" }, "model_pipeline_tag": { "type": "string", "example": "text-generation", "nullable": true }, "model_sha": { "type": "string", "example": "e985a63cdc139290c5f700ff1929f0b5942cced2", "nullable": true }, "sha": { "type": "string", "example": "null", "nullable": true }, "validation_workers": { "type": "integer", "example": "2", "minimum": 0.0 }, "version": { "type": "string", "description": "Router Info", "example": "0.5.0" }, "waiting_served_ratio": { "type": "number", "format": "float", "example": "1.2" } } }, "PrefillToken": { "type": "object", "required": [ "id", "text", "logprob" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0.0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "text": { "type": "string", "example": "test" } } }, "StreamDetails": { "type": "object", "required": [ "finish_reason", "generated_tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0.0 }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0.0 } } }, "StreamResponse": { "type": "object", "required": [ "token" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/StreamDetails" } ], "nullable": true }, "generated_text": { "type": "string", "default": "null", "example": "test", "nullable": true }, "token": { "$ref": "#/components/schemas/Token" } } }, "Token": { "type": "object", "required": [ "id", "text", "logprob", "special" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0.0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "special": { "type": "boolean", "example": "false" }, "text": { "type": "string", "example": "test" } } } } }, "tags": [ { "name": "Text Generation Inference", "description": "Hugging Face Text Generation Inference API" } ] }
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/_toctree.yml
- sections: - local: index title: Text Generation Inference - local: quicktour title: Quick Tour - local: installation title: Installation - local: supported_models title: Supported Models and Hardware title: Getting started - sections: - local: basic_tutorials/consuming_tgi title: Consuming TGI - local: basic_tutorials/preparing_model title: Preparing Model for Serving - local: basic_tutorials/gated_model_access title: Serving Private & Gated Models - local: basic_tutorials/using_cli title: Using TGI CLI title: Tutorials
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/index.md
# Text Generation Inference Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and T5. ![Text Generation Inference](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/TGI.png) Text Generation Inference implements many optimizations and features, such as: - Simple launcher to serve most popular LLMs - Production ready (distributed tracing with Open Telemetry, Prometheus metrics) - Tensor Parallelism for faster inference on multiple GPUs - Token streaming using Server-Sent Events (SSE) - Continuous batching of incoming requests for increased total throughput - Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures - Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323) - [Safetensors](https://github.com/huggingface/safetensors) weight loading - Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) - Logits warper (temperature scaling, top-p, top-k, repetition penalty) - Stop sequences - Log probabilities Text Generation Inference is used in production by multiple projects, such as: - [Hugging Chat](https://github.com/huggingface/chat-ui), an open-source interface for open-access models, such as Open Assistant and Llama - [OpenAssistant](https://open-assistant.io/), an open-source community effort to train LLMs in the open - [nat.dev](http://nat.dev/), a playground to explore and compare LLMs.
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/installation.md
# Installation This section explains how to install the CLI tool as well as installing TGI from source. **The strongly recommended approach is to use Docker, as it does not require much setup. Check [the Quick Tour](./quicktour) to learn how to run TGI with Docker.** ## Install CLI You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, you need to first clone the TGI repository and then run `make`. ```bash git clone https://github.com/huggingface/text-generation-inference.git && cd text-generation-inference make install ``` If you would like to serve models with custom kernels, run ```bash BUILD_EXTENSIONS=True make install ``` ## Local Installation from Source Before you start, you will need to setup your environment, and install Text Generation Inference. Text Generation Inference is tested on **Python 3.9+**. Text Generation Inference is available on pypi, conda and GitHub. To install and launch locally, first [install Rust](https://rustup.rs/) and create a Python virtual environment with at least Python 3.9, e.g. using conda: ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh conda create -n text-generation-inference python=3.9 conda activate text-generation-inference ``` You may also need to install Protoc. On Linux: ```bash PROTOC_ZIP=protoc-21.12-linux-x86_64.zip curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' rm -f $PROTOC_ZIP ``` On MacOS, using Homebrew: ```bash brew install protobuf ``` Then run to install Text Generation Inference: ```bash git clone https://github.com/huggingface/text-generation-inference.git && cd text-generation-inference BUILD_EXTENSIONS=True make install ``` <Tip warning={true}> On some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run: ```bash sudo apt-get install libssl-dev gcc -y ``` </Tip> Once installation is done, simply run: ```bash make run-falcon-7b-instruct ``` This will serve Falcon 7B Instruct model from the port 8080, which we can query.
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/quicktour.md
# Quick Tour The easiest way of getting started is using the official Docker container. Install Docker following [their installation instructions](https://docs.docker.com/get-docker/). Let's say you want to deploy [Falcon-7B Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct) model with TGI. Here is an example on how to do that: ```bash model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model ``` <Tip warning={true}> To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) . We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. </Tip> Once TGI is running, you can use the `generate` endpoint by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section. ```bash curl 127.0.0.1:8080/generate -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json' ``` <Tip> To see all possible flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash docker run ghcr.io/huggingface/text-generation-inference:1.0.0 --help ``` </Tip>
0
hf_public_repos/text-generation-inference/docs
hf_public_repos/text-generation-inference/docs/source/supported_models.md
# Supported Models and Hardware Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models are hardware are supported. ## Supported Models The following models are optimized and can be served with TGI, which uses custom CUDA kernels for better inference. You can add the flag `--disable-custom-kernels` at the end of the `docker run` command if you wish to disable them. - [BLOOM](https://huggingface.co/bigscience/bloom) - [FLAN-T5](https://huggingface.co/google/flan-t5-xxl) - [Galactica](https://huggingface.co/facebook/galactica-120b) - [GPT-Neox](https://huggingface.co/EleutherAI/gpt-neox-20b) - [Llama](https://github.com/facebookresearch/llama) - [OPT](https://huggingface.co/facebook/opt-66b) - [SantaCoder](https://huggingface.co/bigcode/santacoder) - [Starcoder](https://huggingface.co/bigcode/starcoder) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b) - [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b) - [MPT](https://huggingface.co/mosaicml/mpt-30b) - [Llama V2](https://huggingface.co/meta-llama) If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models: ```python # for causal LMs/text-generation models AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")` # or, for text-to-text generation models AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto") ``` ## Supported Hardware TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 11.8+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other hardware, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. TGI is also supported on the following AI hardware accelerators: - *Habana first-gen Gaudi and Gaudi2:* check out this [example](https://github.com/huggingface/optimum-habana/tree/main/text-generation-inference) how to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index)
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md
# Consuming Text Generation Inference There are many ways you can consume Text Generation Inference server in your applications. After launching, you can use the `/generate` route and make a `POST` request to get results from the server. You can also use the `/generate_stream` route if you want TGI to return a stream of tokens. You can make the requests using the tool of your preference, such as curl, Python or TypeScrpt. For a final end-to-end experience, we also open-sourced ChatUI, a chat interface for open-source models. ## curl After the launch, you can query the model using either the `/generate` or `/generate_stream` routes: ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ## Inference Client [`huggingface-hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a nice high-level class, [`~huggingface_hub.InferenceClient`], which makes it easy to make calls to a TGI endpoint. `InferenceClient` also takes care of parameter validation and provides a simple to-use interface. You can simply install `huggingface-hub` package with pip. ```bash pip install huggingface-hub ``` Once you start the TGI server, instantiate `InferenceClient()` with the URL to the endpoint serving the model. You can then call `text_generation()` to hit the endpoint through Python. ```python from huggingface_hub import InferenceClient client = InferenceClient(model=URL_TO_ENDPOINT_SERVING_TGI) client.text_generation(prompt="Write a code for snake game", model=URL_TO_ENDPOINT_SERVING_TGI) ``` To stream tokens in `InferenceClient`, simply pass `stream=True`. Another parameter you can use with TGI backend is `details`. You can get more details on generation (tokens, probabilities, etc.) by setting `details` to `True`. By default, `details` is set to `False`, and `text_generation` returns a string. If you pass `details=True` and `stream=True`, `text_generation` will return a `TextGenerationStreamResponse` which consists of the generated token, generated text, and details. ```python output = client.text_generation(prompt="Meaning of life is", model=URL_OF_ENDPOINT, details=True) print(output) # TextGenerationResponse(generated_text=' a complex concept that is not always clear to the individual. It is a concept that is not always', details=Details(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=20, seed=None, prefill=[], tokens=[Token(id=267, text=' a', logprob=-2.0723474, special=False), Token(id=11235, text=' complex', logprob=-3.1272552, special=False), Token(id=17908, text=' concept', logprob=-1.3632495, special=False),..)) ``` You can see how to stream below. ```python output = client.text_generation(prompt="Meaning of life is", model="http://localhost:3000/", stream=True, details=True) print(next(iter(output))) # TextGenerationStreamResponse(token=Token(id=267, text=' a', logprob=-2.0723474, special=False), generated_text=None, details=None) ``` You can check out the details of the function [here](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation). ## ChatUI ChatUI is an open-source interface built for LLM serving. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces. To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served. ``` { // rest of the model config here "endpoints": [{"url": "https://HOST:PORT/generate_stream"}] } ``` ![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chatui_screen.png) ## API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available [here](https://huggingface.github.io/text-generation-inference).
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/gated_model_access.md
# Serving Private & Gated Models If the model you wish to serve is behind gated access or the model repository on Hugging Face Hub is private, and you have access to the model, you can provide your Hugging Face Hub access token. You can generate and copy a read token from [Hugging Face Hub tokens page](https://huggingface.co/settings/tokens) If you're using the CLI, set the `HUGGING_FACE_HUB_TOKEN` environment variable. For example: ``` export HUGGING_FACE_HUB_TOKEN=<YOUR READ TOKEN> ``` If you would like to do it through Docker, you can provide your token by specifying `HUGGING_FACE_HUB_TOKEN` as shown below. ```bash model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data token=<your READ token> docker run --gpus all \ --shm-size 1g \ -e HUGGING_FACE_HUB_TOKEN=$token \ -p 8080:80 \ -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 \ --model-id $model ```
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/preparing_model.md
# Preparing the Model Text Generation Inference improves the model in several aspects. ## Quantization TGI supports [bits-and-bytes](https://github.com/TimDettmers/bitsandbytes#bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323) quantization. To speed up inference with quantization, simply set `quantize` flag to `bitsandbytes` or `gptq` depending on the quantization technique you wish to use. When using GPT-Q quantization, you need to point to one of the models [here](https://huggingface.co/models?search=gptq). ## RoPE Scaling RoPE scaling can be used to increase the sequence length of the model during the inference time without necessarily fine-tuning it. To enable RoPE scaling, simply pass `--rope-scaling`, `--max-input-length` and `--rope-factors` flags when running through CLI. `--rope-scaling` can take the values `linear` or `dynamic`. If your model is not fine-tuned to a longer sequence length, use `dynamic`. `--rope-factor` is the ratio between the intended max sequence length and the model's original max sequence length. Make sure to pass `--max-input-length` to provide maximum input length for extension. <Tip> We recommend using `dynamic` RoPE scaling. </Tip> ## Safetensors [Safetensors](https://github.com/huggingface/safetensors) is a fast and safe persistence format for deep learning models, and is required for tensor parallelism. TGI supports `safetensors` model loading under the hood. By default, given a repository with `safetensors` and `pytorch` weights, TGI will always load `safetensors`. If there's no `pytorch` weights, TGI will convert the weights to `safetensors` format.
0
hf_public_repos/text-generation-inference/docs/source
hf_public_repos/text-generation-inference/docs/source/basic_tutorials/using_cli.md
# Using TGI CLI You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, please refer to [the installation section](./installation#install-cli). `text-generation-server` lets you download the model with `download-weights` command like below 👇 ```bash text-generation-server download-weights MODEL_HUB_ID ``` You can also use it to quantize models like below 👇 ```bash text-generation-server quantize MODEL_HUB_ID OUTPUT_DIR ``` You can use `text-generation-launcher` to serve models. ```bash text-generation-launcher --model-id MODEL_HUB_ID --port 8080 ``` There are many options and parameters you can pass to `text-generation-launcher`. The documentation for CLI is kept minimal and intended to rely on self-generating documentation, which can be found by running ```bash text-generation-launcher --help ``` You can also find it hosted in this [Swagger UI](https://huggingface.github.io/text-generation-inference/). Same documentation can be found for `text-generation-server`. ```bash text-generation-server --help ```
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/integration-tests/conftest.py
import sys import subprocess import contextlib import pytest import asyncio import os import docker import json import math import time import random from docker.errors import NotFound from typing import Optional, List, Dict from syrupy.extensions.json import JSONSnapshotExtension from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError from text_generation import AsyncClient from text_generation.types import Response, Details, InputToken, Token, BestOfSequence DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None) HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None) DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data") class ResponseComparator(JSONSnapshotExtension): def serialize( self, data, *, exclude=None, matcher=None, ): if isinstance(data, List): data = [d.dict() for d in data] data = self._filter( data=data, depth=0, path=(), exclude=exclude, matcher=matcher ) return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n" def matches( self, *, serialized_data, snapshot_data, ) -> bool: def convert_data(data): data = json.loads(data) if isinstance(data, Dict): return Response(**data) if isinstance(data, List): return [Response(**d) for d in data] raise NotImplementedError def eq_token(token: Token, other: Token) -> bool: return ( token.id == other.id and token.text == other.text and math.isclose(token.logprob, other.logprob, rel_tol=0.2) and token.special == other.special ) def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool: try: return ( prefill_token.id == other.id and prefill_token.text == other.text and ( math.isclose(prefill_token.logprob, other.logprob, rel_tol=0.2) if prefill_token.logprob is not None else prefill_token.logprob == other.logprob ) ) except TypeError: return False def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool: return ( details.finish_reason == other.finish_reason and details.generated_tokens == other.generated_tokens and details.seed == other.seed and len(details.prefill) == len(other.prefill) and all( [ eq_prefill_token(d, o) for d, o in zip(details.prefill, other.prefill) ] ) and len(details.tokens) == len(other.tokens) and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)]) ) def eq_details(details: Details, other: Details) -> bool: return ( details.finish_reason == other.finish_reason and details.generated_tokens == other.generated_tokens and details.seed == other.seed and len(details.prefill) == len(other.prefill) and all( [ eq_prefill_token(d, o) for d, o in zip(details.prefill, other.prefill) ] ) and len(details.tokens) == len(other.tokens) and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)]) and ( len(details.best_of_sequences) if details.best_of_sequences is not None else 0 ) == ( len(other.best_of_sequences) if other.best_of_sequences is not None else 0 ) and ( all( [ eq_best_of(d, o) for d, o in zip( details.best_of_sequences, other.best_of_sequences ) ] ) if details.best_of_sequences is not None else details.best_of_sequences == other.best_of_sequences ) ) def eq_response(response: Response, other: Response) -> bool: return response.generated_text == other.generated_text and eq_details( response.details, other.details ) serialized_data = convert_data(serialized_data) snapshot_data = convert_data(snapshot_data) if not isinstance(serialized_data, List): serialized_data = [serialized_data] if not isinstance(snapshot_data, List): snapshot_data = [snapshot_data] return len(snapshot_data) == len(serialized_data) and all( [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)] ) class LauncherHandle: def __init__(self, port: int): self.client = AsyncClient(f"http://localhost:{port}") def _inner_health(self): raise NotImplementedError async def health(self, timeout: int = 60): assert timeout > 0 for _ in range(timeout): if not self._inner_health(): raise RuntimeError("Launcher crashed") try: await self.client.generate("test") return except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e: time.sleep(1) raise RuntimeError("Health check failed") class ContainerLauncherHandle(LauncherHandle): def __init__(self, docker_client, container_name, port: int): super(ContainerLauncherHandle, self).__init__(port) self.docker_client = docker_client self.container_name = container_name def _inner_health(self) -> bool: container = self.docker_client.containers.get(self.container_name) return container.status in ["running", "created"] class ProcessLauncherHandle(LauncherHandle): def __init__(self, process, port: int): super(ProcessLauncherHandle, self).__init__(port) self.process = process def _inner_health(self) -> bool: return self.process.poll() is None @pytest.fixture def response_snapshot(snapshot): return snapshot.use_extension(ResponseComparator) @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop loop.close() @pytest.fixture(scope="module") def launcher(event_loop): @contextlib.contextmanager def local_launcher( model_id: str, num_shard: Optional[int] = None, quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, ): port = random.randint(8000, 10_000) master_port = random.randint(10_000, 20_000) shard_uds_path = ( f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server" ) args = [ "text-generation-launcher", "--model-id", model_id, "--port", str(port), "--master-port", str(master_port), "--shard-uds-path", shard_uds_path, ] env = os.environ if num_shard is not None: args.extend(["--num-shard", str(num_shard)]) if quantize is not None: args.append("--quantize") args.append(quantize) if trust_remote_code: args.append("--trust-remote-code") env["LOG_LEVEL"] = "info,text_generation_router=debug" if not use_flash_attention: env["USE_FLASH_ATTENTION"] = "false" with subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env ) as process: yield ProcessLauncherHandle(process, port) process.terminate() process.wait(60) launcher_output = process.stdout.read().decode("utf-8") print(launcher_output, file=sys.stderr) process.stdout.close() process.stderr.close() if not use_flash_attention: del env["USE_FLASH_ATTENTION"] @contextlib.contextmanager def docker_launcher( model_id: str, num_shard: Optional[int] = None, quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, ): port = random.randint(8000, 10_000) args = ["--model-id", model_id, "--env"] if num_shard is not None: args.extend(["--num-shard", str(num_shard)]) if quantize is not None: args.append("--quantize") args.append(quantize) if trust_remote_code: args.append("--trust-remote-code") client = docker.from_env() container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}" try: container = client.containers.get(container_name) container.stop() container.wait() except NotFound: pass gpu_count = num_shard if num_shard is not None else 1 env = {"LOG_LEVEL": "info,text_generation_router=debug"} if not use_flash_attention: env["USE_FLASH_ATTENTION"] = "false" if HUGGING_FACE_HUB_TOKEN is not None: env["HUGGING_FACE_HUB_TOKEN"] = HUGGING_FACE_HUB_TOKEN volumes = [] if DOCKER_VOLUME: volumes = [f"{DOCKER_VOLUME}:/data"] container = client.containers.run( DOCKER_IMAGE, command=args, name=container_name, environment=env, auto_remove=False, detach=True, device_requests=[ docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]]) ], volumes=volumes, ports={"80/tcp": port}, ) yield ContainerLauncherHandle(client, container.name, port) if not use_flash_attention: del env["USE_FLASH_ATTENTION"] try: container.stop() container.wait() except NotFound: pass container_output = container.logs().decode("utf-8") print(container_output, file=sys.stderr) container.remove() if DOCKER_IMAGE is not None: return docker_launcher return local_launcher @pytest.fixture(scope="module") def generate_load(): async def generate_load_inner( client: AsyncClient, prompt: str, max_new_tokens: int, n: int ) -> List[Response]: futures = [ client.generate( prompt, max_new_tokens=max_new_tokens, decoder_input_details=True ) for _ in range(n) ] return await asyncio.gather(*futures) return generate_load_inner
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/integration-tests/pytest.ini
[pytest] addopts = --snapshot-warn-unused asyncio_mode = auto markers = private: marks tests as requiring an admin hf token (deselect with '-m "not private"')
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/integration-tests/requirements.txt
syrupy text-generation pytest pytest-asyncio==0.17.2 docker
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m.py
import pytest @pytest.fixture(scope="module") def bloom_560_handle(launcher): with launcher("bigscience/bloom-560m") as handle: yield handle @pytest.fixture(scope="module") async def bloom_560(bloom_560_handle): await bloom_560_handle.health(240) return bloom_560_handle.client @pytest.mark.asyncio async def test_bloom_560m(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_all_params(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot): responses = await generate_load( bloom_560, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py
import pytest @pytest.fixture(scope="module") def bloom_560m_sharded_handle(launcher): with launcher("bigscience/bloom-560m", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def bloom_560m_sharded(bloom_560m_sharded_handle): await bloom_560m_sharded_handle.health(240) return bloom_560m_sharded_handle.client @pytest.mark.asyncio async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot): response = await bloom_560m_sharded.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_sharded_load( bloom_560m_sharded, generate_load, response_snapshot ): responses = await generate_load( bloom_560m_sharded, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_falcon.py
import pytest @pytest.fixture(scope="module") def flash_falcon_handle(launcher): with launcher("tiiuae/falcon-7b", trust_remote_code=True) as handle: yield handle @pytest.fixture(scope="module") async def flash_falcon(flash_falcon_handle): await flash_falcon_handle.health(300) return flash_falcon_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_falcon(flash_falcon, response_snapshot): response = await flash_falcon.generate( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_falcon_all_params(flash_falcon, response_snapshot): response = await flash_falcon.generate( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_falcon_load(flash_falcon, generate_load, response_snapshot): responses = await generate_load( flash_falcon, "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama.py
import pytest @pytest.fixture(scope="module") def flash_llama_handle(launcher): with launcher("huggingface/llama-7b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama(flash_llama_handle): await flash_llama_handle.health(300) return flash_llama_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_all_params(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 5 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_load(flash_llama, generate_load, response_snapshot): responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama_gptq.py
import pytest @pytest.fixture(scope="module") def flash_llama_gptq_handle(launcher): with launcher("huggingface/llama-7b-gptq", num_shard=2, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_gptq(flash_llama_gptq_handle): await flash_llama_gptq_handle.health(300) return flash_llama_gptq_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq(flash_llama_gptq, response_snapshot): response = await flash_llama_gptq.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot): response = await flash_llama_gptq.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq_load( flash_llama_gptq, generate_load, response_snapshot ): responses = await generate_load( flash_llama_gptq, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_neox.py
import pytest @pytest.fixture(scope="module") def flash_neox_handle(launcher): with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox(flash_neox_handle): await flash_neox_handle.health(300) return flash_neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox(flash_neox, response_snapshot): response = await flash_neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox_load(flash_neox, generate_load, response_snapshot): responses = await generate_load( flash_neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert all( [text == generated_texts[0] for text in generated_texts] ), generated_texts assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_neox_sharded.py
import pytest @pytest.fixture(scope="module") def flash_neox_sharded_handle(launcher): with launcher("OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox_sharded(flash_neox_sharded_handle): await flash_neox_sharded_handle.health(300) return flash_neox_sharded_handle.client @pytest.mark.asyncio async def test_flash_neox(flash_neox_sharded, response_snapshot): response = await flash_neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_neox_load(flash_neox_sharded, generate_load, response_snapshot): responses = await generate_load( flash_neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_santacoder.py
import pytest @pytest.fixture(scope="module") def flash_santacoder_handle(launcher): with launcher("bigcode/santacoder") as handle: yield handle @pytest.fixture(scope="module") async def flash_santacoder(flash_santacoder_handle): await flash_santacoder_handle.health(300) return flash_santacoder_handle.client @pytest.mark.asyncio async def test_flash_santacoder(flash_santacoder, response_snapshot): response = await flash_santacoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_santacoder_load( flash_santacoder, generate_load, response_snapshot ): responses = await generate_load( flash_santacoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder.py
import pytest @pytest.fixture(scope="module") def flash_starcoder_handle(launcher): with launcher("bigcode/starcoder", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder(flash_starcoder_handle): await flash_starcoder_handle.health(300) return flash_starcoder_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder(flash_starcoder, response_snapshot): response = await flash_starcoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_default_params(flash_starcoder, response_snapshot): response = await flash_starcoder.generate( "def print_hello", max_new_tokens=60, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 60 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_load(flash_starcoder, generate_load, response_snapshot): responses = await generate_load( flash_starcoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py
import pytest @pytest.fixture(scope="module") def flash_starcoder_gptq_handle(launcher): with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder_gptq(flash_starcoder_gptq_handle): await flash_starcoder_gptq_handle.health(300) return flash_starcoder_gptq_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq(flash_starcoder_gptq, response_snapshot): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, decoder_input_details=True, ) assert response.details.generated_tokens == 20 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq_default_params( flash_starcoder_gptq, response_snapshot ): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 20 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq_load( flash_starcoder_gptq, generate_load, response_snapshot ): responses = await generate_load( flash_starcoder_gptq, "def geometric_mean(L: List[float]):", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_mpt.py
import pytest @pytest.fixture(scope="module") def mpt_sharded_handle(launcher): with launcher("mosaicml/mpt-7b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def mpt_sharded(mpt_sharded_handle): await mpt_sharded_handle.health(300) return mpt_sharded_handle.client @pytest.mark.asyncio async def test_mpt(mpt_sharded, response_snapshot): response = await mpt_sharded.generate( "What is Deep Learning?", max_new_tokens=17, decoder_input_details=True, ) assert response.details.generated_tokens == 17 assert ( response.generated_text == " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" ) assert response == response_snapshot @pytest.mark.asyncio async def test_mpt_load(mpt_sharded, generate_load, response_snapshot): responses = await generate_load( mpt_sharded, "What is Deep Learning?", max_new_tokens=17, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert ( responses[0].generated_text == " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" ) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_mt0_base.py
import pytest @pytest.fixture(scope="module") def mt0_base_handle(launcher): with launcher("bigscience/mt0-base") as handle: yield handle @pytest.fixture(scope="module") async def mt0_base(mt0_base_handle): await mt0_base_handle.health(300) return mt0_base_handle.client @pytest.mark.asyncio async def test_mt0_base(mt0_base, response_snapshot): response = await mt0_base.generate( "Why is the sky blue?", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 5 assert response == response_snapshot @pytest.mark.asyncio async def test_mt0_base_all_params(mt0_base, response_snapshot): response = await mt0_base.generate( "Why is the sky blue?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 9 assert response == response_snapshot @pytest.mark.asyncio async def test_mt0_base_load(mt0_base, generate_load, response_snapshot): responses = await generate_load( mt0_base, "Why is the sky blue?", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_neox.py
import pytest @pytest.fixture(scope="module") def neox_handle(launcher): with launcher( "stabilityai/stablelm-tuned-alpha-3b", num_shard=1, use_flash_attention=False ) as handle: yield handle @pytest.fixture(scope="module") async def neox(neox_handle): await neox_handle.health(300) return neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_neox(neox, response_snapshot): response = await neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_neox_load(neox, generate_load, response_snapshot): responses = await generate_load( neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert generated_texts, all( [text == generated_texts[0] for text in generated_texts] ) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_neox_sharded.py
import pytest @pytest.fixture(scope="module") def neox_sharded_handle(launcher): with launcher( "OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2, use_flash_attention=False ) as handle: yield handle @pytest.fixture(scope="module") async def neox_sharded(neox_sharded_handle): await neox_sharded_handle.health(300) return neox_sharded_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_neox(neox_sharded, response_snapshot): response = await neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_neox_load(neox_sharded, generate_load, response_snapshot): responses = await generate_load( neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_t5_sharded.py
import pytest @pytest.fixture(scope="module") def t5_sharded_handle(launcher): with launcher("google/flan-t5-xxl", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def t5_sharded(t5_sharded_handle): await t5_sharded_handle.health(300) return t5_sharded_handle.client @pytest.mark.asyncio async def test_t5_sharded(t5_sharded, response_snapshot): response = await t5_sharded.generate( "Please answer the following question. What is the boiling point of Nitrogen?", max_new_tokens=10, decoder_input_details=True, ) assert response == response_snapshot @pytest.mark.asyncio async def test_t5_sharded_load(t5_sharded, generate_load, response_snapshot): responses = await generate_load( t5_sharded, "Please answer the following question. What is the boiling point of Nitrogen?", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5625, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4609375, "text": " ort" }, { "id": 35567, "logprob": -7.5585938, "text": "olan" }, { "id": 15, "logprob": -1.4003906, "text": "," }, { "id": 1669, "logprob": -1.5673828, "text": " il" }, { "id": 11580, "logprob": -0.94628906, "text": " faut" }, { "id": 3913, "logprob": -3.703125, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 578, "logprob": -1.6591797, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.4492188, "special": false, "text": " faire" }, { "id": 159570, "logprob": -6.6835938, "special": false, "text": " réch" }, { "id": 810, "logprob": 0.0, "special": false, "text": "au" }, { "id": 12736, "logprob": 0.0, "special": false, "text": "ffer" }, { "id": 1742, "logprob": -2.5175781, "special": false, "text": " au" }, { "id": 6105, "logprob": -2.0078125, "special": false, "text": " bain" }, { "id": 88254, "logprob": -0.12695312, "special": false, "text": "-mar" }, { "id": 641, "logprob": 0.0, "special": false, "text": "ie" }, { "id": 2940, "logprob": -3.5175781, "special": false, "text": " avec" } ] }, "generated_text": " le faire réchauffer au bain-marie avec" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 15, "logprob": null, "text": "," }, { "id": 1669, "logprob": -5.4414062, "text": " il" }, { "id": 11580, "logprob": -2.3378906, "text": " faut" }, { "id": 3913, "logprob": -4.3554688, "text": " tout" }, { "id": 39261, "logprob": -2.9238281, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 408, "logprob": -0.07891846, "special": false, "text": " que" }, { "id": 366, "logprob": -1.2939453, "special": false, "text": " la" }, { "id": 8769, "logprob": -0.3708496, "special": false, "text": " personne" }, { "id": 1479, "logprob": -2.2871094, "special": false, "text": " qui" }, { "id": 2997, "logprob": -0.8671875, "special": false, "text": " vous" }, { "id": 35977, "logprob": -1.5097656, "special": false, "text": " suit" }, { "id": 21558, "logprob": -0.07891846, "special": false, "text": " ait" }, { "id": 447, "logprob": -0.12695312, "special": false, "text": " un" }, { "id": 78606, "logprob": -2.21875, "special": false, "text": " profil" }, { "id": 3899, "logprob": -1.3535156, "special": false, "text": " bien" } ] }, "generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui vous suit ait un profil bien" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5625, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4609375, "text": " ort" }, { "id": 35567, "logprob": -7.5585938, "text": "olan" }, { "id": 15, "logprob": -1.4003906, "text": "," }, { "id": 1669, "logprob": -1.5673828, "text": " il" }, { "id": 11580, "logprob": -0.94628906, "text": " faut" }, { "id": 3913, "logprob": -3.703125, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7646484, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6113281, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5263672, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.2119141, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.40844727, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037841797, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0195312, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 578, "logprob": -1.6474609, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5097656, "special": false, "text": " faire" }, { "id": 159570, "logprob": -6.65625, "special": false, "text": " réch" }, { "id": 810, "logprob": 0.0, "special": false, "text": "au" }, { "id": 12736, "logprob": 0.0, "special": false, "text": "ffer" }, { "id": 1742, "logprob": -2.5859375, "special": false, "text": " au" }, { "id": 6105, "logprob": -2.03125, "special": false, "text": " bain" }, { "id": 88254, "logprob": -0.12695312, "special": false, "text": "-mar" }, { "id": 641, "logprob": 0.0, "special": false, "text": "ie" }, { "id": 2940, "logprob": -3.5175781, "special": false, "text": " avec" } ] }, "generated_text": " le faire réchauffer au bain-marie avec" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7529297, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6054688, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5283203, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4716797, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11853027, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.41210938, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037765503, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0166016, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6132812, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.078125, "text": "ron" }, { "id": 304, "logprob": -2.3261719, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07556152, "text": "aff" }, { "id": 255, "logprob": -0.0067749023, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.734375, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.004432678, "text": " of" }, { "id": 414, "logprob": -1.9677734, "text": " this" }, { "id": 6490, "logprob": -2.046875, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2753906, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20874023, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5664062, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7675781, "text": " are" }, { "id": 23981, "logprob": -5.0, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103637695, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6835938, "text": " glorious" }, { "id": 64398, "logprob": -1.8173828, "text": " majesty" }, { "id": 275, "logprob": -0.23510742, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24633789, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17333984, "text": "." }, { "id": 193, "logprob": -1.3935547, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10809326, "text": "af" }, { "id": 249, "logprob": -0.042663574, "text": "at" }, { "id": 1480, "logprob": -0.0024776459, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1015625, "text": "\n" }, { "id": 50, "logprob": -0.05709839, "text": "G" }, { "id": 330, "logprob": -0.13208008, "text": "ir" }, { "id": 1622, "logprob": -0.0071487427, "text": "af" }, { "id": 249, "logprob": -0.008468628, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074691772, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3173828, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23803711, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.56933594, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.41967773, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023403168, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007904053, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 330, "logprob": null, "text": "ir" }, { "id": 1622, "logprob": -7.8125, "text": "af" }, { "id": 249, "logprob": -4.5, "text": "at" }, { "id": 1480, "logprob": -10.875, "text": "ron" }, { "id": 37, "logprob": -3.6875, "text": ":" } ], "seed": 0, "tokens": [ { "id": 836, "logprob": -1.265625, "special": false, "text": " i" }, { "id": 18, "logprob": -0.119628906, "special": false, "text": "'" }, { "id": 298, "logprob": -2.265625, "special": false, "text": "ve" }, { "id": 650, "logprob": -0.49804688, "special": false, "text": " been" }, { "id": 1241, "logprob": 0.0, "special": false, "text": " using" }, { "id": 334, "logprob": 0.0, "special": false, "text": " it" }, { "id": 312, "logprob": -1.2421875, "special": false, "text": " for" }, { "id": 909, "logprob": -0.99609375, "special": false, "text": " years" }, { "id": 193, "logprob": -0.30273438, "special": false, "text": "\n" }, { "id": 807, "logprob": -1.078125, "special": false, "text": "ik" } ] }, "generated_text": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron: i've been using it for years\nik" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.04837036, "text": " with" }, { "id": 26680, "logprob": -3.9960938, "text": " gir" }, { "id": 1903, "logprob": -0.07525635, "text": "aff" }, { "id": 255, "logprob": -0.006790161, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1845703, "text": " on" }, { "id": 248, "logprob": -0.77734375, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.0044403076, "text": " of" }, { "id": 414, "logprob": -1.9667969, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.921875, "text": " G" }, { "id": 6013, "logprob": -2.2714844, "text": "ira" }, { "id": 694, "logprob": -0.62353516, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5625, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21899414, "text": " animals" }, { "id": 362, "logprob": -0.76708984, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103515625, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6796875, "text": " glorious" }, { "id": 64398, "logprob": -1.8222656, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.3544922, "text": " the" }, { "id": 26680, "logprob": -0.24609375, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17358398, "text": "." }, { "id": 193, "logprob": -1.3925781, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.5898438, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2304688, "text": " Gir" }, { "id": 1622, "logprob": -0.107788086, "text": "af" }, { "id": 249, "logprob": -0.04257202, "text": "at" }, { "id": 1480, "logprob": -0.0024871826, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056915283, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.0071105957, "text": "af" }, { "id": 249, "logprob": -0.008453369, "text": "at" }, { "id": 1480, "logprob": -0.0006928444, "text": "ron" }, { "id": 37, "logprob": -0.0074920654, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.828125, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3178711, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23925781, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.4177246, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023345947, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5283203, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007965088, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5380859, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5917969, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2773438, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034362793, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96533203, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36669922, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013122559, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1503906, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43652344, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9404297, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 5, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": 0, "tokens": [ { "id": 5229, "logprob": -2.5683594, "special": false, "text": " failed" }, { "id": 29901, "logprob": -0.45336914, "special": false, "text": ":" }, { "id": 4829, "logprob": -1.8408203, "special": false, "text": " Error" }, { "id": 297, "logprob": -1.0556641, "special": false, "text": " in" }, { "id": 1243, "logprob": 0.0, "special": false, "text": " test" } ] }, "generated_text": "Test requestfailed: Error in test" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5380859, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5859375, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2695312, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.03439331, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36694336, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013114929, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1542969, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43847656, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9433594, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5322266, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5585938, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.265625, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034088135, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36816406, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013191223, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43774414, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9443359, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5322266, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5585938, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.265625, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034088135, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36816406, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013191223, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43774414, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9443359, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5322266, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5585938, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.265625, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034088135, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36816406, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013191223, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43774414, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9443359, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.59375, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3867188, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8183594, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6367188, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6542969, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056121826, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.01600647, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.87939453, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7529297, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.2980957, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": 0, "tokens": [ { "id": 29899, "logprob": -1.1640625, "special": false, "text": "-" }, { "id": 1454, "logprob": -0.07543945, "special": false, "text": "for" }, { "id": 29899, "logprob": 0.0, "special": false, "text": "-" }, { "id": 9342, "logprob": 0.0, "special": false, "text": "comment" }, { "id": 29901, "logprob": 0.0, "special": false, "text": ":" }, { "id": 396, "logprob": -0.2956543, "special": false, "text": " #" }, { "id": 29906, "logprob": -0.52734375, "special": false, "text": "2" }, { "id": 29900, "logprob": -0.6899414, "special": false, "text": "0" }, { "id": 29896, "logprob": 0.0, "special": false, "text": "1" }, { "id": 29946, "logprob": -1.5068359, "special": false, "text": "4" } ] }, "generated_text": "Test request-for-comment: #2014" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.671875, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3828125, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8105469, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0546875, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6513672, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056365967, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016082764, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.87841797, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7548828, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29711914, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3828125, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.828125, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6386719, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6542969, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.055877686, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016021729, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8769531, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7583008, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29833984, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.671875, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3847656, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8144531, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.65478516, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056243896, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016143799, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8808594, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.75341797, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.2956543, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3769531, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8183594, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0546875, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.65478516, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.05557251, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.01612854, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8730469, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7519531, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29785156, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.1054688, "text": " your" }, { "id": 12315, "logprob": -9.953125, "text": " mood" }, { "id": 3063, "logprob": -4.0820312, "text": " today" }, { "id": 32, "logprob": -0.15148926, "text": "?" }, { "id": 50279, "logprob": -0.27026367, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.88378906, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2402344, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3725586, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.078125, "special": false, "text": "," }, { "id": 534, "logprob": -0.67822266, "special": false, "text": " which" }, { "id": 310, "logprob": -1.3837891, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7050781, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.052001953, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.1054688, "text": " your" }, { "id": 12315, "logprob": -9.953125, "text": " mood" }, { "id": 3063, "logprob": -4.0820312, "text": " today" }, { "id": 32, "logprob": -0.15148926, "text": "?" }, { "id": 50279, "logprob": -0.27026367, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.88378906, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9819336, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2421875, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3474121, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.078125, "special": false, "text": "," }, { "id": 534, "logprob": -0.69140625, "special": false, "text": " which" }, { "id": 310, "logprob": -1.4072266, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7041016, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.053375244, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0351562, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5390625, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002090454, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3589859e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009455681, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.088012695, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12585449, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017196655, "special": false, "text": " used" }, { "id": 275, "logprob": -0.49731445, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21447754, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.43701172, "special": false, "text": " print" }, { "id": 372, "logprob": -0.5361328, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2412109, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.7583008, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.20837402, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2470703, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2590332, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39379883, "special": false, "text": " print" }, { "id": 440, "logprob": -0.61376953, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.47338867, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.80810547, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7397461, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0371094, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 60, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6328125, "text": " print" }, { "id": 81, "logprob": -1.6035156, "text": "_" }, { "id": 7656, "logprob": -5.9882812, "text": "hello" } ], "seed": 0, "tokens": [ { "id": 2262, "logprob": -0.042999268, "special": false, "text": "():" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -0.38549805, "special": false, "text": " World" }, { "id": 657, "logprob": -0.5229492, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.10632324, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": -0.20141602, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": -0.16027832, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 11442, "logprob": 0.0, "special": false, "text": " age" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 313, "logprob": -0.6328125, "special": false, "text": " \"" }, { "id": 313, "logprob": -1.7011719, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 596, "logprob": 0.0, "special": false, "text": " str" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 490, "logprob": 0.0, "special": false, "text": "))" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json
{ "generated_text": "\n return sum(L) / len(L)\n\n\ndef geometric_mean(L", "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "seed": null, "prefill": [ { "id": 589, "text": "def", "logprob": null }, { "id": 3226, "text": " ge", "logprob": -9.0234375 }, { "id": 21017, "text": "ometric", "logprob": -9.0859375 }, { "id": 81, "text": "_", "logprob": -0.25878906 }, { "id": 6009, "text": "mean", "logprob": -2.2109375 }, { "id": 26, "text": "(", "logprob": -0.30371094 }, { "id": 62, "text": "L", "logprob": -5.6054688 }, { "id": 44, "text": ":", "logprob": -3.0722656 }, { "id": 1682, "text": " List", "logprob": -0.6879883 }, { "id": 77, "text": "[", "logprob": -0.38500977 }, { "id": 1808, "text": "float", "logprob": -0.984375 }, { "id": 10794, "text": "]):", "logprob": -2.5351562 } ], "tokens": [ { "id": 284, "text": "\n ", "logprob": -1.1738281, "special": false }, { "id": 442, "text": " return", "logprob": -0.95947266, "special": false }, { "id": 3632, "text": " sum", "logprob": -1.4199219, "special": false }, { "id": 26, "text": "(", "logprob": -0.085876465, "special": false }, { "id": 62, "text": "L", "logprob": -0.09875488, "special": false }, { "id": 27, "text": ")", "logprob": -0.30517578, "special": false }, { "id": 517, "text": " /", "logprob": -0.42089844, "special": false }, { "id": 2069, "text": " len", "logprob": -0.042053223, "special": false }, { "id": 26, "text": "(", "logprob": -0.0011806488, "special": false }, { "id": 62, "text": "L", "logprob": -0.0005259514, "special": false }, { "id": 27, "text": ")", "logprob": -0.0017633438, "special": false }, { "id": 478, "text": "\n\n", "logprob": -0.69189453, "special": false }, { "id": 203, "text": "\n", "logprob": -0.041870117, "special": false }, { "id": 589, "text": "def", "logprob": -0.27856445, "special": false }, { "id": 3226, "text": " ge", "logprob": -1.7255859, "special": false }, { "id": 21017, "text": "ometric", "logprob": -0.011291504, "special": false }, { "id": 81, "text": "_", "logprob": -0.008430481, "special": false }, { "id": 6009, "text": "mean", "logprob": -0.025787354, "special": false }, { "id": 26, "text": "(", "logprob": -0.073913574, "special": false }, { "id": 62, "text": "L", "logprob": -0.09967041, "special": false } ] } }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.09375, "text": "ometric" }, { "id": 81, "logprob": -0.25976562, "text": "_" }, { "id": 6009, "logprob": -2.2148438, "text": "mean" }, { "id": 26, "logprob": -0.3010254, "text": "(" }, { "id": 62, "logprob": -5.6757812, "text": "L" }, { "id": 44, "logprob": -3.0898438, "text": ":" }, { "id": 1682, "logprob": -0.6791992, "text": " List" }, { "id": 77, "logprob": -0.38891602, "text": "[" }, { "id": 1808, "logprob": -0.92041016, "text": "float" }, { "id": 10794, "logprob": -2.5390625, "text": "]):" } ], "seed": 0, "tokens": [ { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 442, "logprob": 0.0, "special": false, "text": " return" }, { "id": 11665, "logprob": -1.6005859, "special": false, "text": " reduce" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 5962, "logprob": 0.0, "special": false, "text": "lambda" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 44, "logprob": 0.0, "special": false, "text": ":" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 319, "logprob": 0.0, "special": false, "text": " *" }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 498, "logprob": 0.0, "special": false, "text": " L" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": -0.11968994, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 3226, "logprob": 0.0, "special": false, "text": " ge" }, { "id": 21017, "logprob": 0.0, "special": false, "text": "ometric" } ] }, "generated_text": "\n return reduce(lambda x, y: x * y, L)\n\ndef geometric" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.91796875, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.3291016, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.08062744, "special": false, "text": "(" }, { "id": 62, "logprob": -0.097717285, "special": false, "text": "L" }, { "id": 27, "logprob": -0.29003906, "special": false, "text": ")" }, { "id": 517, "logprob": -0.34958984, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.03829956, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011987686, "special": false, "text": "(" }, { "id": 62, "logprob": -0.00050878525, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25878906, "text": "_" }, { "id": 6009, "logprob": -2.2109375, "text": "mean" }, { "id": 26, "logprob": -0.30371094, "text": "(" }, { "id": 62, "logprob": -5.6054688, "text": "L" }, { "id": 44, "logprob": -3.0722656, "text": ":" }, { "id": 1682, "logprob": -0.6879883, "text": " List" }, { "id": 77, "logprob": -0.38500977, "text": "[" }, { "id": 1808, "logprob": -0.984375, "text": "float" }, { "id": 10794, "logprob": -2.5351562, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1738281, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.9584961, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.4169922, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.085876465, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0982666, "special": false, "text": "L" }, { "id": 27, "logprob": -0.3022461, "special": false, "text": ")" }, { "id": 517, "logprob": -0.40504883, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.041656494, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011844635, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0005264282, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.9165039, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.328125, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.07946777, "special": false, "text": "(" }, { "id": 62, "logprob": -0.09820557, "special": false, "text": "L" }, { "id": 27, "logprob": -0.28930664, "special": false, "text": ")" }, { "id": 517, "logprob": -0.34592773, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.038330078, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011940002, "special": false, "text": "(" }, { "id": 62, "logprob": -0.00050878525, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.91259766, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.3251953, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.08062744, "special": false, "text": "(" }, { "id": 62, "logprob": -0.09906006, "special": false, "text": "L" }, { "id": 27, "logprob": -0.28979492, "special": false, "text": ")" }, { "id": 517, "logprob": -0.35958984, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.038604736, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011901855, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0005078316, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3105469, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.3215332, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5566406, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6074219, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.6923828, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5263672, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8544922, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6118164, "special": false, "text": "field" }, { "id": 273, "logprob": -0.055877686, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0537109, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.0115737915, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9111328, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4589844, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4853516, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021636963, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3183594, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.32617188, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5742188, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6015625, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.67822266, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5395508, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8623047, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6020508, "special": false, "text": "field" }, { "id": 273, "logprob": -0.0552063, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0742188, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011405945, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9165039, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4501953, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4960938, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.02116394, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 5, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 926, "logprob": -4.3554688, "special": false, "text": " To" }, { "id": 18295, "logprob": -7.7734375, "special": false, "text": " sell" }, { "id": 7868, "logprob": -3.9257812, "special": false, "text": " things" }, { "id": 260, "logprob": -2.4179688, "special": false, "text": "." }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "To sell things." }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 9, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 16017, "logprob": -0.30908203, "special": false, "text": " blue" }, { "id": 20495, "logprob": 0.0, "special": false, "text": " sky" }, { "id": 259, "logprob": -0.28271484, "special": false, "text": " " }, { "id": 15484, "logprob": -1.7929688, "special": false, "text": "appear" }, { "id": 345, "logprob": -0.8935547, "special": false, "text": "ed" }, { "id": 281, "logprob": 0.0, "special": false, "text": " in" }, { "id": 287, "logprob": 0.0, "special": false, "text": " the" }, { "id": 20495, "logprob": -0.32299805, "special": false, "text": " sky" }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "Why is the sky blue?blue sky appeared in the sky" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3798828, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36328125, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0947266, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8286133, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6826172, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.7290039, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1992188, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8984375, "text": " mood" }, { "id": 3063, "logprob": -4.0976562, "text": " today" }, { "id": 32, "logprob": -0.14562988, "text": "?" }, { "id": 50279, "logprob": -0.26733398, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.86279297, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1835938, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.074035645, "special": false, "text": "," }, { "id": 1394, "logprob": -0.86376953, "special": false, "text": "You" }, { "id": 452, "logprob": -1.2070312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4365234, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.109375, "special": false, "text": " choice" }, { "id": 273, "logprob": -0.93408203, "special": false, "text": " of" }, { "id": 752, "logprob": -1.8808594, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5449219, "special": false, "text": " word" }, { "id": 346, "logprob": -0.05038452, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002292633, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3828278e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010242462, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.090270996, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12719727, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016571045, "special": false, "text": " used" }, { "id": 275, "logprob": -0.43432617, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5498047, "special": false, "text": " word" }, { "id": 346, "logprob": -0.04815674, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002313614, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.2636185e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010147095, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0859375, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12609863, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016601562, "special": false, "text": " used" }, { "id": 275, "logprob": -0.38256836, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -9.059906e-06, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00096797943, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.07940674, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12182617, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017227173, "special": false, "text": " used" }, { "id": 275, "logprob": -0.44482422, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -9.059906e-06, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00096797943, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.07940674, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12182617, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017227173, "special": false, "text": " used" }, { "id": 275, "logprob": -0.44482422, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.04904175e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009560585, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.08557129, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12084961, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.4025879, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" } ]
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/launcher/Cargo.toml
[package] name = "text-generation-launcher" description = "Text Generation Launcher" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] clap = { version = "4.1.4", features = ["derive", "env"] } ctrlc = { version = "3.2.5", features = ["termination"] } nix = "0.26.2" serde = { version = "1.0.152", features = ["derive"] } serde_json = "1.0.93" tracing = "0.1.37" tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter"] } [dev-dependencies] float_eq = "1.0.1" reqwest = { version = "0.11.14", features = ["blocking", "json"] } [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/launcher/build.rs
use std::error::Error; use vergen::EmitBuilder; fn main() -> Result<(), Box<dyn Error>> { // Emit cargo and rustc compile time values EmitBuilder::builder().all_cargo().all_rustc().emit()?; // Try to get the git sha from the local git repository if EmitBuilder::builder() .fail_on_error() .git_sha(false) .emit() .is_err() { // Unable to get the git sha if let Ok(sha) = std::env::var("GIT_SHA") { // Set it from an env var println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}"); } } // Set docker label if present if let Ok(label) = std::env::var("DOCKER_LABEL") { // Set it from an env var println!("cargo:rustc-env=DOCKER_LABEL={label}"); } Ok(()) }
0