diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..09faee817d7c7ce2e4f08834c1cc1a3d6c7131ad 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,35 +1,37 @@ -*.7z filter=lfs diff=lfs merge=lfs -text -*.arrow filter=lfs diff=lfs merge=lfs -text -*.bin filter=lfs diff=lfs merge=lfs -text -*.bz2 filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.ftz filter=lfs diff=lfs merge=lfs -text -*.gz filter=lfs diff=lfs merge=lfs -text -*.h5 filter=lfs diff=lfs merge=lfs -text -*.joblib filter=lfs diff=lfs merge=lfs -text -*.lfs.* filter=lfs diff=lfs merge=lfs -text -*.mlmodel filter=lfs diff=lfs merge=lfs -text -*.model filter=lfs diff=lfs merge=lfs -text -*.msgpack filter=lfs diff=lfs merge=lfs -text -*.npy filter=lfs diff=lfs merge=lfs -text -*.npz filter=lfs diff=lfs merge=lfs -text -*.onnx filter=lfs diff=lfs merge=lfs -text -*.ot filter=lfs diff=lfs merge=lfs -text -*.parquet filter=lfs diff=lfs merge=lfs -text -*.pb filter=lfs diff=lfs merge=lfs -text -*.pickle filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text -*.pt filter=lfs diff=lfs merge=lfs -text -*.pth filter=lfs diff=lfs merge=lfs -text -*.rar filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -saved_model/**/* filter=lfs diff=lfs merge=lfs -text -*.tar.* filter=lfs diff=lfs merge=lfs -text -*.tar filter=lfs diff=lfs merge=lfs -text -*.tflite filter=lfs diff=lfs merge=lfs -text -*.tgz filter=lfs diff=lfs merge=lfs -text -*.wasm filter=lfs diff=lfs merge=lfs -text -*.xz filter=lfs diff=lfs merge=lfs -text -*.zip filter=lfs diff=lfs merge=lfs -text -*.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +arxivdb/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text +models/models--jinaai--jina-embeddings-v2-base-en/blobs/6b70f1386f05b9703ea4edf7f1550a8925399f9580e4cc754cc099efc1e736d8 filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore index 423fc105fb741e0549cd37d4aa93fcad9ad1606b..726642c6ff2628b46a897e7ccf5a1912e80f65f4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,5 @@ -arxivdb/ -models/ -__pycache__/ -*.pyc -apikey.txt -db.sqlite3 +models/ +__pycache__/ +*.pyc +apikey.txt hotfix.ipynb \ No newline at end of file diff --git a/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/data_level0.bin b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/data_level0.bin new file mode 100644 index 0000000000000000000000000000000000000000..69a84488e00f1ac0433dbfb804e100e1379e6278 --- /dev/null +++ b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/data_level0.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d96c82cf4183e567eddf45be92064c7d818268621da9821caa2367bb20cba18 +size 32120000 diff --git a/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/header.bin b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..5da71c652499f08b634c260a762e43bfd4e03a7d --- /dev/null +++ b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/header.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a494575edafaafb2b60f5a2ad563719976abf7ae3a35ca7c9b5aaae36842006c +size 100 diff --git a/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/index_metadata.pickle b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/index_metadata.pickle new file mode 100644 index 0000000000000000000000000000000000000000..562e7d3d18299efd38f83d69caeefcea0e6ab67c --- /dev/null +++ b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/index_metadata.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91e4880dca7113b4c3a3644e63aa5809f4a30474d1332f66d5f0ad082fe41833 +size 357939 diff --git a/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/length.bin b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/length.bin new file mode 100644 index 0000000000000000000000000000000000000000..a1ba8dc11b8f22cd87e64aa9262e32e330b92e8b --- /dev/null +++ b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/length.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:814d4b3244fb0f86f8d5beac519239863d973c20c8fec45624d0c0ae54baf9cf +size 40000 diff --git a/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/link_lists.bin b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/link_lists.bin new file mode 100644 index 0000000000000000000000000000000000000000..1ffbfce2bc5c9b3895ea7cb773cc6ca0cb5b6681 --- /dev/null +++ b/arxivdb/4b0c8007-4402-4129-b225-8bd2a39f0757/link_lists.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d679f3012c3a4ae23e21dbfce89bb153cab85edef4c19f5340a4464e99f4c014 +size 87396 diff --git a/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/data_level0.bin b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/data_level0.bin new file mode 100644 index 0000000000000000000000000000000000000000..cde82c254f79f1d964b654b1b1305f2427fae2ee --- /dev/null +++ b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/data_level0.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31ea31ff76723407f460b7534220ef974bfb3a563732c1a85e01fd9b2610dc13 +size 6424000 diff --git a/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/header.bin b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..b3cc34f03cc332f6531643c281a8a5239e0c9397 --- /dev/null +++ b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/header.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db5064bd751b93036fa600922f99c2534c183c3335c5267c8c5413a73f450320 +size 100 diff --git a/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/index_metadata.pickle b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/index_metadata.pickle new file mode 100644 index 0000000000000000000000000000000000000000..17c7a438a5cef626c2e22b08611313c8af1c8e24 --- /dev/null +++ b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/index_metadata.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd938c16ea62b22a52094297d5d570442daba226ad67e941b0254655e843c67a +size 65937 diff --git a/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/length.bin b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/length.bin new file mode 100644 index 0000000000000000000000000000000000000000..82baedfc42cab904b74748a8001fa30348a5f950 --- /dev/null +++ b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/length.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d9d14a589aeeaf2e86552f9c3f1bb4f556e49244f186540c71bac6c1680e834 +size 8000 diff --git a/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/link_lists.bin b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/link_lists.bin new file mode 100644 index 0000000000000000000000000000000000000000..0e3aa7ea71fd4ad65c4c1f31e145f3573e539e90 --- /dev/null +++ b/arxivdb/6a7f20ca-1ffd-40b4-9707-7d9628097d5a/link_lists.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3751bd54da338722a3b5370921bf446e34169a639a18beb7145e5d4e9e3778e3 +size 18268 diff --git a/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/data_level0.bin b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/data_level0.bin new file mode 100644 index 0000000000000000000000000000000000000000..6c7e20a04debd65bf293fedaa402738ff14ffdaf --- /dev/null +++ b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/data_level0.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86e41597eb04379b7582da7eeb5fb0aaca29eb32749069e69886358370fab575 +size 3212000 diff --git a/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/header.bin b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..ab9650a6eae2ab620bfd68fdbcaed1bc3749dc8d --- /dev/null +++ b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/header.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdb00e89b6ee7733fd37556b1da3447d9895ad7431512096c0e073ed667a25d0 +size 100 diff --git a/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/index_metadata.pickle b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/index_metadata.pickle new file mode 100644 index 0000000000000000000000000000000000000000..06512c6925c28536fb33c8a4218760b31fb46a56 --- /dev/null +++ b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/index_metadata.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd5b11142a96276baf9591e9524a8d9241eb013902301021dddea3a81b61d63a +size 33934 diff --git a/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/length.bin b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/length.bin new file mode 100644 index 0000000000000000000000000000000000000000..a927c169d38b616e09f5112b158897bc3b9ec7e1 --- /dev/null +++ b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/length.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e680a6fe8d1f2bf76260963cf27e0c7bd58c39e9c82262906a285eaf89b1c27d +size 4000 diff --git a/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/link_lists.bin b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/link_lists.bin new file mode 100644 index 0000000000000000000000000000000000000000..04794088aafc4386e14d20c91c44a4d5cea0f1e4 --- /dev/null +++ b/arxivdb/7e557f5a-be88-4080-aa85-e3bcd927fcf9/link_lists.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:666164435753a1784160baf485cc1c80e665103e6bd19a1998430f93246f1c29 +size 8624 diff --git a/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/data_level0.bin b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/data_level0.bin new file mode 100644 index 0000000000000000000000000000000000000000..6bd379359f33dc9d09379fee3125c1a644051275 --- /dev/null +++ b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/data_level0.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecac6a0b1c9974d085507909895bec9040788bd20bf184eae140000cef97551d +size 38544000 diff --git a/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/header.bin b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..e6adeedd4e0a7364ec3fa2108f6b4fa4a26b5aac --- /dev/null +++ b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/header.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:269f137da42a494d996ad44046f5e349b59d2d31eca4b39aa82d7ec76f62cdf9 +size 100 diff --git a/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/index_metadata.pickle b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/index_metadata.pickle new file mode 100644 index 0000000000000000000000000000000000000000..3511cd544f36b091fa7cfa5cd182dbe6c4508903 --- /dev/null +++ b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/index_metadata.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08461957df6b58835618a34c77a9c96b6dc54f21e04c60c9d10dd36d5b864414 +size 429953 diff --git a/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/length.bin b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/length.bin new file mode 100644 index 0000000000000000000000000000000000000000..2e4009145b05aca69dc61e74c8a75abf81159cc1 --- /dev/null +++ b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/length.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e648660e0a36f652356dd7e0210b243cba14b3b7c267c3c05fdc7614b1d2dd03 +size 48000 diff --git a/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/link_lists.bin b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/link_lists.bin new file mode 100644 index 0000000000000000000000000000000000000000..422184c7d7100df2ff5c85ef3eefb31006422024 --- /dev/null +++ b/arxivdb/951c5fab-677a-4406-b99b-b4006c3423a2/link_lists.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13b650da98a6bd2ec371437494a2cb09a2fae5b67d6eead12af43b40fb548e7c +size 104644 diff --git a/arxivdb/chroma.sqlite3 b/arxivdb/chroma.sqlite3 new file mode 100644 index 0000000000000000000000000000000000000000..b00f6212ea800f33fc1c2b61959ccc158f4e039a --- /dev/null +++ b/arxivdb/chroma.sqlite3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c48f817474996b45a3f4da1e127a2fde083db4bfeddb71893d598b8200fb056 +size 123736064 diff --git a/arxivdb/chromadb.sqlite3 b/arxivdb/chromadb.sqlite3 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/chat/__init__.py b/chat/__init__.py index ab9853350c73d44ce64d398732321e29e37d10ad..676b93643920277ce245b03d81cd4ee111c87afc 100644 --- a/chat/__init__.py +++ b/chat/__init__.py @@ -3,7 +3,7 @@ import chat.arxiv_bot.arxiv_bot_utils as utils import os from getpass import getpass import json -from .model_manage import get_model +# from .model_manage import get_model -model = get_model() +# model = get_model() diff --git a/chat/__pycache__/__init__.cpython-311.pyc b/chat/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..585080fd9529f4cd4ba0d1a220db25c44759cda0 Binary files /dev/null and b/chat/__pycache__/__init__.cpython-311.pyc differ diff --git a/chat/__pycache__/apps.cpython-311.pyc b/chat/__pycache__/apps.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8b0344003f9dcd4c195b22961abc0bf9de2f25f Binary files /dev/null and b/chat/__pycache__/apps.cpython-311.pyc differ diff --git a/chat/__pycache__/consumers.cpython-311.pyc b/chat/__pycache__/consumers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe1b9579b504eb3db0f7dc985e57fc89ef06d240 Binary files /dev/null and b/chat/__pycache__/consumers.cpython-311.pyc differ diff --git a/chat/__pycache__/model_manage.cpython-311.pyc b/chat/__pycache__/model_manage.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c0ba3d63b08d0fd91ffce91c876f765bc150daa Binary files /dev/null and b/chat/__pycache__/model_manage.cpython-311.pyc differ diff --git a/chat/__pycache__/model_manage2.cpython-311.pyc b/chat/__pycache__/model_manage2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfcb069446e525ea853ed4e7e5d69def70f0e625 Binary files /dev/null and b/chat/__pycache__/model_manage2.cpython-311.pyc differ diff --git a/chat/__pycache__/routing.cpython-311.pyc b/chat/__pycache__/routing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2003080b1469e7664b5ba17315bc572f79a1d553 Binary files /dev/null and b/chat/__pycache__/routing.cpython-311.pyc differ diff --git a/chat/__pycache__/urls.cpython-311.pyc b/chat/__pycache__/urls.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7cd7f5bf89d564202ccf0907ab8d0f59a3706ca Binary files /dev/null and b/chat/__pycache__/urls.cpython-311.pyc differ diff --git a/chat/__pycache__/views.cpython-311.pyc b/chat/__pycache__/views.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8abc131384723227e945c2fa3be2e2a0fc712cb4 Binary files /dev/null and b/chat/__pycache__/views.cpython-311.pyc differ diff --git a/chat/arxiv_bot/__pycache__/arxiv_bot_utils.cpython-311.pyc b/chat/arxiv_bot/__pycache__/arxiv_bot_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..364883a126a0fbb37d4ce955e8adcb9035b30cc8 Binary files /dev/null and b/chat/arxiv_bot/__pycache__/arxiv_bot_utils.cpython-311.pyc differ diff --git a/chat/arxiv_bot/__pycache__/arxiv_bot_utils2.cpython-311.pyc b/chat/arxiv_bot/__pycache__/arxiv_bot_utils2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fee7c9f436b9930343d7ff283eea1c2d7e98fd0 Binary files /dev/null and b/chat/arxiv_bot/__pycache__/arxiv_bot_utils2.cpython-311.pyc differ diff --git a/chat/arxiv_bot/arxiv_bot_utils.py b/chat/arxiv_bot/arxiv_bot_utils.py index 8fbf6d11bea7cb2d9f05e2878b32a510004e91ba..a48d2a9b14b0916cd325c6425388c19ae5724ebd 100644 --- a/chat/arxiv_bot/arxiv_bot_utils.py +++ b/chat/arxiv_bot/arxiv_bot_utils.py @@ -1,276 +1,276 @@ -import chromadb -from chromadb import Documents, EmbeddingFunction, Embeddings -from transformers import AutoModel -import json -from numpy.linalg import norm -import sqlite3 -import urllib -from django.conf import settings +# import chromadb +# from chromadb import Documents, EmbeddingFunction, Embeddings +# from transformers import AutoModel +# import json +# from numpy.linalg import norm +# import sqlite3 +# import urllib +# from django.conf import settings -# this module act as a singleton class +# # this module act as a singleton class -class JinaAIEmbeddingFunction(EmbeddingFunction): - def __init__(self, model): - super().__init__() - self.model = model +# class JinaAIEmbeddingFunction(EmbeddingFunction): +# def __init__(self, model): +# super().__init__() +# self.model = model - def __call__(self, input: Documents) -> Embeddings: - embeddings = self.model.encode(input) - return embeddings.tolist() +# def __call__(self, input: Documents) -> Embeddings: +# embeddings = self.model.encode(input) +# return embeddings.tolist() -# instance of embedding_model -embedding_model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en', - trust_remote_code=True, - cache_dir='models') +# # instance of embedding_model +# embedding_model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en', +# trust_remote_code=True, +# cache_dir='models') -# instance of JinaAIEmbeddingFunction -ef = JinaAIEmbeddingFunction(embedding_model) +# # instance of JinaAIEmbeddingFunction +# ef = JinaAIEmbeddingFunction(embedding_model) -# list of topics -topic_descriptions = json.load(open("topic_descriptions.txt")) -topics = list(dict.keys(topic_descriptions)) -embeddings = [embedding_model.encode(topic_descriptions[key]) for key in topic_descriptions] -cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b)) +# # list of topics +# topic_descriptions = json.load(open("topic_descriptions.txt")) +# topics = list(dict.keys(topic_descriptions)) +# embeddings = [embedding_model.encode(topic_descriptions[key]) for key in topic_descriptions] +# cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b)) -def choose_topic(summary): - embed = embedding_model.encode(summary) - topic = "" - max_sim = 0. - for i,key in enumerate(topics): - sim = cos_sim(embed,embeddings[i]) - if sim > max_sim: - topic = key - max_sim = sim - return topic +# def choose_topic(summary): +# embed = embedding_model.encode(summary) +# topic = "" +# max_sim = 0. +# for i,key in enumerate(topics): +# sim = cos_sim(embed,embeddings[i]) +# if sim > max_sim: +# topic = key +# max_sim = sim +# return topic -def authors_list_to_str(authors): - """input a list of authors, return a string represent authors""" - text = "" - for author in authors: - text+=author+", " - return text[:-3] +# def authors_list_to_str(authors): +# """input a list of authors, return a string represent authors""" +# text = "" +# for author in authors: +# text+=author+", " +# return text[:-3] -def authors_str_to_list(string): - """input a string of authors, return a list of authors""" - authors = [] - list_auth = string.split("and") - for author in list_auth: - if author != "et al.": - authors.append(author.strip()) - return authors +# def authors_str_to_list(string): +# """input a string of authors, return a list of authors""" +# authors = [] +# list_auth = string.split("and") +# for author in list_auth: +# if author != "et al.": +# authors.append(author.strip()) +# return authors -def chunk_texts(text, max_char=400): - """ - Chunk a long text into several chunks, with each chunk about 300-400 characters long, - but make sure no word is cut in half. - Args: - text: The long text to be chunked. - max_char: The maximum number of characters per chunk (default: 400). - Returns: - A list of chunks. - """ - chunks = [] - current_chunk = "" - words = text.split() - for word in words: - if len(current_chunk) + len(word) + 1 >= max_char: - chunks.append(current_chunk) - current_chunk = " " - else: - current_chunk += " " + word - chunks.append(current_chunk.strip()) - return chunks +# def chunk_texts(text, max_char=400): +# """ +# Chunk a long text into several chunks, with each chunk about 300-400 characters long, +# but make sure no word is cut in half. +# Args: +# text: The long text to be chunked. +# max_char: The maximum number of characters per chunk (default: 400). +# Returns: +# A list of chunks. +# """ +# chunks = [] +# current_chunk = "" +# words = text.split() +# for word in words: +# if len(current_chunk) + len(word) + 1 >= max_char: +# chunks.append(current_chunk) +# current_chunk = " " +# else: +# current_chunk += " " + word +# chunks.append(current_chunk.strip()) +# return chunks -def trimming(txt): - start = txt.find("{") - end = txt.rfind("}") - return txt[start:end+1].replace("\n"," ") +# def trimming(txt): +# start = txt.find("{") +# end = txt.rfind("}") +# return txt[start:end+1].replace("\n"," ") -# crawl data +# # crawl data -def extract_tag(txt,tagname): - return txt[txt.find("<"+tagname+">")+len(tagname)+2:txt.find("")] +# def extract_tag(txt,tagname): +# return txt[txt.find("<"+tagname+">")+len(tagname)+2:txt.find("")] -def get_record(extract): - id = extract_tag(extract,"id") - updated = extract_tag(extract,"updated") - published = extract_tag(extract,"published") - title = extract_tag(extract,"title").replace("\n ","").strip() - summary = extract_tag(extract,"summary").replace("\n","").strip() - authors = [] - while extract.find("")!=-1: - author = extract_tag(extract,"name") - extract = extract[extract.find("")+9:] - authors.append(author) - pattern = '")!=-1: +# author = extract_tag(extract,"name") +# extract = extract[extract.find("")+9:] +# authors.append(author) +# pattern = '") != -1: - extract = xml[xml.find("")+7:xml.find("")] - xml = xml[xml.find("")+8:] - extract = get_record(extract) - topic = choose_topic(extract[6]) - records.append([topic,*extract]) - return records - except Exception as e: - return "Error: "+str(e) +# def crawl_exact_paper(title,author,max_results=3): +# authors = authors_list_to_str(author) +# records = [] +# url = 'http://export.arxiv.org/api/query?search_query=ti:{title}+AND+au:{author}&max_results={max_results}'.format(title=title,author=authors,max_results=max_results) +# url = url.replace(" ","%20") +# try: +# arxiv_page = urllib.request.urlopen(url,timeout=100).read() +# xml = str(arxiv_page,encoding="utf-8") +# while xml.find("") != -1: +# extract = xml[xml.find("")+7:xml.find("")] +# xml = xml[xml.find("")+8:] +# extract = get_record(extract) +# topic = choose_topic(extract[6]) +# records.append([topic,*extract]) +# return records +# except Exception as e: +# return "Error: "+str(e) -def crawl_arxiv(keyword_list, max_results=100): - baseurl = 'http://export.arxiv.org/api/query?search_query=' - records = [] - for i,keyword in enumerate(keyword_list): - if i ==0: - url = baseurl + 'all:' + keyword - else: - url = url + '+OR+' + 'all:' + keyword - url = url+ '&max_results=' + str(max_results) - url = url.replace(' ', '%20') - try: - arxiv_page = urllib.request.urlopen(url,timeout=100).read() - xml = str(arxiv_page,encoding="utf-8") - while xml.find("") != -1: - extract = xml[xml.find("")+7:xml.find("")] - xml = xml[xml.find("")+8:] - extract = get_record(extract) - topic = choose_topic(extract[6]) - records.append([topic,*extract]) - return records - except Exception as e: - return "Error: "+str(e) +# def crawl_arxiv(keyword_list, max_results=100): +# baseurl = 'http://export.arxiv.org/api/query?search_query=' +# records = [] +# for i,keyword in enumerate(keyword_list): +# if i ==0: +# url = baseurl + 'all:' + keyword +# else: +# url = url + '+OR+' + 'all:' + keyword +# url = url+ '&max_results=' + str(max_results) +# url = url.replace(' ', '%20') +# try: +# arxiv_page = urllib.request.urlopen(url,timeout=100).read() +# xml = str(arxiv_page,encoding="utf-8") +# while xml.find("") != -1: +# extract = xml[xml.find("")+7:xml.find("")] +# xml = xml[xml.find("")+8:] +# extract = get_record(extract) +# topic = choose_topic(extract[6]) +# records.append([topic,*extract]) +# return records +# except Exception as e: +# return "Error: "+str(e) -class ArxivSQL: - def __init__(self, table="arxivsql", name="db.sqlite3"): - self.con = sqlite3.connect(name) - self.cur = self.con.cursor() - self.table = table +# class ArxivSQL: +# def __init__(self, table="arxivsql", name="db.sqlite3"): +# self.con = sqlite3.connect(name) +# self.cur = self.con.cursor() +# self.table = table - def query(self, title="", author=[]): - if len(title)>0: - query_title = 'title like "%{}%"'.format(title) - else: - query_title = "True" - if len(author)>0: - query_author = 'authors like ' - for auth in author: - query_author += "'%{}%' or ".format(auth) - query_author = query_author[:-4] - else: - query_author = "True" - query = "select * from {} where {} and {}".format(self.table,query_title,query_author) - result = self.cur.execute(query) - return result.fetchall() +# def query(self, title="", author=[]): +# if len(title)>0: +# query_title = 'title like "%{}%"'.format(title) +# else: +# query_title = "True" +# if len(author)>0: +# query_author = 'authors like ' +# for auth in author: +# query_author += "'%{}%' or ".format(auth) +# query_author = query_author[:-4] +# else: +# query_author = "True" +# query = "select * from {} where {} and {}".format(self.table,query_title,query_author) +# result = self.cur.execute(query) +# return result.fetchall() - def query_id(self, ids=[]): - try: - if len(ids) == 0: - return None - query = "select * from {} where id in (".format(self.table) - for id in ids: - query+="'"+id+"'," - query = query[:-1] + ")" - result = self.cur.execute(query) - return result.fetchall() - except Exception as e: - print(e) - print("Error query: ",query) +# def query_id(self, ids=[]): +# try: +# if len(ids) == 0: +# return None +# query = "select * from {} where id in (".format(self.table) +# for id in ids: +# query+="'"+id+"'," +# query = query[:-1] + ")" +# result = self.cur.execute(query) +# return result.fetchall() +# except Exception as e: +# print(e) +# print("Error query: ",query) - def add(self, crawl_records): - """ - Add crawl_records (list) obtained from arxiv_crawlers - A record is a list of 8 columns: - [topic, id, updated, published, title, author, link, summary] - Return the final length of the database table - """ - results = "" - for record in crawl_records: - try: - query = """insert into arxivsql values("{}","{}","{}","{}","{}","{}","{}")""".format( - record[1][21:], - record[0], - record[4].replace('"',"'"), - authors_list_to_str(record[5]), - record[2][:10], - record[3][:10], - record[6] - ) - self.cur.execute(query) - self.con.commit() - except Exception as e: - result+=str(e) - result+="\n" + query + "\n" - finally: - return results +# def add(self, crawl_records): +# """ +# Add crawl_records (list) obtained from arxiv_crawlers +# A record is a list of 8 columns: +# [topic, id, updated, published, title, author, link, summary] +# Return the final length of the database table +# """ +# results = "" +# for record in crawl_records: +# try: +# query = """insert into arxivsql values("{}","{}","{}","{}","{}","{}","{}")""".format( +# record[1][21:], +# record[0], +# record[4].replace('"',"'"), +# authors_list_to_str(record[5]), +# record[2][:10], +# record[3][:10], +# record[6] +# ) +# self.cur.execute(query) +# self.con.commit() +# except Exception as e: +# result+=str(e) +# result+="\n" + query + "\n" +# finally: +# return results -# instance of ArxivSQL -sqldb = ArxivSQL() +# # instance of ArxivSQL +# sqldb = ArxivSQL() -class ArxivChroma: - """ - Create an interface to arxivdb, which only support query and addition. - This interface do not support edition and deletion procedures. - """ - def __init__(self, table="arxiv_records", name="arxivdb/"): - self.client = chromadb.PersistentClient(name) - self.model = embedding_model - self.collection = self.client.get_or_create_collection(table, - embedding_function=JinaAIEmbeddingFunction( - model = self.model - )) +# class ArxivChroma: +# """ +# Create an interface to arxivdb, which only support query and addition. +# This interface do not support edition and deletion procedures. +# """ +# def __init__(self, table="arxiv_records", name="arxivdb/"): +# self.client = chromadb.PersistentClient(name) +# self.model = embedding_model +# self.collection = self.client.get_or_create_collection(table, +# embedding_function=JinaAIEmbeddingFunction( +# model = self.model +# )) - def query_relevant(self, keywords, query_texts, n_results=3): - """ - Perform a query using a list of keywords (str), - or using a relavant string - """ - contains = [] - for keyword in keywords: - contains.append({"$contains":keyword.lower()}) - return self.collection.query( - query_texts=query_texts, - where_document={ - "$or":contains - }, - n_results=n_results, - ) +# def query_relevant(self, keywords, query_texts, n_results=3): +# """ +# Perform a query using a list of keywords (str), +# or using a relavant string +# """ +# contains = [] +# for keyword in keywords: +# contains.append({"$contains":keyword.lower()}) +# return self.collection.query( +# query_texts=query_texts, +# where_document={ +# "$or":contains +# }, +# n_results=n_results, +# ) - def query_exact(self, id): - ids = ["{}_{}".format(id,j) for j in range(0,10)] - return self.collection.get(ids=ids) +# def query_exact(self, id): +# ids = ["{}_{}".format(id,j) for j in range(0,10)] +# return self.collection.get(ids=ids) - def add(self, crawl_records): - """ - Add crawl_records (list) obtained from arxiv_crawlers - A record is a list of 8 columns: - [topic, id, updated, published, title, author, link, summary] - Return the final length of the database table - """ - for record in crawl_records: - embed_text = """ - Topic: {}, - Title: {}, - Summary: {} - """.format(record[0],record[4],record[7]) - chunks = chunk_texts(embed_text) - ids = [record[1][21:]+"_"+str(j) for j in range(len(chunks))] - paper_ids = [{"paper_id":record[1][21:]} for _ in range(len(chunks))] - self.collection.add( - documents = chunks, - metadatas=paper_ids, - ids = ids - ) - return self.collection.count() +# def add(self, crawl_records): +# """ +# Add crawl_records (list) obtained from arxiv_crawlers +# A record is a list of 8 columns: +# [topic, id, updated, published, title, author, link, summary] +# Return the final length of the database table +# """ +# for record in crawl_records: +# embed_text = """ +# Topic: {}, +# Title: {}, +# Summary: {} +# """.format(record[0],record[4],record[7]) +# chunks = chunk_texts(embed_text) +# ids = [record[1][21:]+"_"+str(j) for j in range(len(chunks))] +# paper_ids = [{"paper_id":record[1][21:]} for _ in range(len(chunks))] +# self.collection.add( +# documents = chunks, +# metadatas=paper_ids, +# ids = ids +# ) +# return self.collection.count() -# instance of ArxivChroma -db = ArxivChroma() +# # instance of ArxivChroma +# db = ArxivChroma() diff --git a/chat/arxiv_bot/arxiv_bot_utils2.py b/chat/arxiv_bot/arxiv_bot_utils2.py new file mode 100644 index 0000000000000000000000000000000000000000..f98dc7ef4fd2b29afbf69d79d4bdaed57909feee --- /dev/null +++ b/chat/arxiv_bot/arxiv_bot_utils2.py @@ -0,0 +1,297 @@ +import chromadb +from chromadb import Documents, EmbeddingFunction, Embeddings +from transformers import AutoModel +import json +from numpy.linalg import norm +import sqlite3 +import urllib +from django.conf import settings +import Levenshtein + +# this module act as a singleton class + +class JinaAIEmbeddingFunction(EmbeddingFunction): + def __init__(self, model): + super().__init__() + self.model = model + + def __call__(self, input: Documents) -> Embeddings: + embeddings = self.model.encode(input) + return embeddings.tolist() + +# instance of embedding_model +embedding_model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en', + trust_remote_code=True, + cache_dir='models') + +# instance of JinaAIEmbeddingFunction +ef = JinaAIEmbeddingFunction(embedding_model) + +# list of topics +topic_descriptions = json.load(open("topic_descriptions.txt")) +topics = list(dict.keys(topic_descriptions)) +embeddings = [embedding_model.encode(topic_descriptions[key]) for key in topic_descriptions] +cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b)) + +def lev_sim(a,b): return Levenshtein.distance(a,b) + +def choose_topic(summary): + embed = embedding_model.encode(summary) + topic = "" + max_sim = 0. + for i,key in enumerate(topics): + sim = cos_sim(embed,embeddings[i]) + if sim > max_sim: + topic = key + max_sim = sim + return topic + +def authors_list_to_str(authors): + """input a list of authors, return a string represent authors""" + text = "" + for author in authors: + text+=author+", " + return text[:-3] + +def authors_str_to_list(string): + """input a string of authors, return a list of authors""" + authors = [] + list_auth = string.split("and") + for author in list_auth: + if author != "et al.": + authors.append(author.strip()) + return authors + +def chunk_texts(text, max_char=400): + """ + Chunk a long text into several chunks, with each chunk about 300-400 characters long, + but make sure no word is cut in half. + Args: + text: The long text to be chunked. + max_char: The maximum number of characters per chunk (default: 400). + Returns: + A list of chunks. + """ + chunks = [] + current_chunk = "" + words = text.split() + for word in words: + if len(current_chunk) + len(word) + 1 >= max_char: + chunks.append(current_chunk) + current_chunk = " " + else: + current_chunk += " " + word + chunks.append(current_chunk.strip()) + return chunks + +def trimming(txt): + start = txt.find("{") + end = txt.rfind("}") + return txt[start:end+1].replace("\n"," ") + +# crawl data + +def extract_tag(txt,tagname): + return txt[txt.find("<"+tagname+">")+len(tagname)+2:txt.find("")] + +def get_record(extract): + id = extract_tag(extract,"id") + updated = extract_tag(extract,"updated") + published = extract_tag(extract,"published") + title = extract_tag(extract,"title").replace("\n ","").strip() + summary = extract_tag(extract,"summary").replace("\n","").strip() + authors = [] + while extract.find("")!=-1: + author = extract_tag(extract,"name") + extract = extract[extract.find("")+9:] + authors.append(author) + pattern = '") != -1: + extract = xml[xml.find("")+7:xml.find("")] + xml = xml[xml.find("")+8:] + extract = get_record(extract) + topic = choose_topic(extract[6]) + records.append([topic,*extract]) + return records + except Exception as e: + return "Error: "+str(e) + +def crawl_arxiv(keyword_list, max_results=100): + baseurl = 'http://export.arxiv.org/api/query?search_query=' + records = [] + for i,keyword in enumerate(keyword_list): + if i ==0: + url = baseurl + 'all:' + keyword + else: + url = url + '+OR+' + 'all:' + keyword + url = url+ '&max_results=' + str(max_results) + url = url.replace(' ', '%20') + try: + arxiv_page = urllib.request.urlopen(url,timeout=100).read() + xml = str(arxiv_page,encoding="utf-8") + while xml.find("") != -1: + extract = xml[xml.find("")+7:xml.find("")] + xml = xml[xml.find("")+8:] + extract = get_record(extract) + topic = choose_topic(extract[6]) + records.append([topic,*extract]) + return records + except Exception as e: + return "Error: "+str(e) + +# This class act as a module +class ArxivChroma: + """ + Create an interface to arxivdb, which only support query and addition. + This interface do not support edition and deletion procedures. + """ + client = None + model = None + collection = None + + @staticmethod + def connect(table="arxiv_records", name="arxivdb/"): + ArxivChroma.client = chromadb.PersistentClient(name) + ArxivChroma.model = embedding_model + ArxivChroma.collection = ArxivChroma.client.get_or_create_collection(table, + embedding_function=JinaAIEmbeddingFunction( + model = ArxivChroma.model + )) + + @staticmethod + def query_relevant(keywords, query_texts, n_results=3): + """ + Perform a query using a list of keywords (str), + or using a relavant string + """ + contains = [] + for keyword in keywords: + contains.append({"$contains":keyword.lower()}) + return ArxivChroma.collection.query( + query_texts=query_texts, + where_document={ + "$or":contains + }, + n_results=n_results, + ) + + @staticmethod + def query_exact(id): + ids = ["{}_{}".format(id,j) for j in range(0,10)] + return ArxivChroma.collection.get(ids=ids) + + @staticmethod + def add(crawl_records): + """ + Add crawl_records (list) obtained from arxiv_crawlers + A record is a list of 8 columns: + [topic, id, updated, published, title, author, link, summary] + Return the final length of the database table + """ + for record in crawl_records: + embed_text = """ + Topic: {}, + Title: {}, + Summary: {} + """.format(record[0],record[4],record[7]) + chunks = chunk_texts(embed_text) + ids = [record[1][21:]+"_"+str(j) for j in range(len(chunks))] + paper_ids = [{"paper_id":record[1][21:]} for _ in range(len(chunks))] + ArxivChroma.collection.add( + documents = chunks, + metadatas=paper_ids, + ids = ids + ) + return ArxivChroma.collection.count() + + @staticmethod + def close_connection(): + pass + +# This class act as a module +class ArxivSQL: + table = "arxivsql" + con = None + cur = None + + @staticmethod + def connect(name="db.sqlite3"): + ArxivSQL.con = sqlite3.connect(name, check_same_thread=False) + ArxivSQL.cur = ArxivSQL.con.cursor() + + @staticmethod + def query(title="", author=[], threshold = 15): + if len(author)>0: + query_author= " OR ".join([f"author LIKE '%{a}%'" for a in author]) + else: + query_author= "True" + # Execute the query + query = f"select * from {ArxivSQL.table} where {query_author}" + results = ArxivSQL.cursor.execute(query).fetchall() + if len(title) == 0: + return results + else: + sim_score = {} + for row in results: + row_title = row[2] + row_id = row[0] + score = lev_sim(title, row_title) + if score < threshold: + sim_score[row_id] = score + sorted_results = sorted(sim_score.items(), key=lambda x: x[1]) + return ArxivSQL.query_id(sorted_results) + + @staticmethod + def query_id(ids=[]): + try: + if len(ids) == 0: + return None + query = "select * from {} where id in (".format(ArxivSQL.table) + for id in ids: + query+="'"+id+"'," + query = query[:-1] + ")" + result = ArxivSQL.cur.execute(query) + return result.fetchall() + except Exception as e: + print(e) + print("Error query: ",query) + + @staticmethod + def add(crawl_records): + """ + Add crawl_records (list) obtained from arxiv_crawlers + A record is a list of 8 columns: + [topic, id, updated, published, title, author, link, summary] + Return the final length of the database table + """ + results = "" + for record in crawl_records: + try: + query = """insert into arxivsql values("{}","{}","{}","{}","{}","{}","{}")""".format( + record[1][21:], + record[0], + record[4].replace('"',"'"), + authors_list_to_str(record[5]), + record[2][:10], + record[3][:10], + record[6] + ) + ArxivSQL.cur.execute(query) + ArxivSQL.con.commit() + except Exception as e: + results+=str(e) + results+="\n" + query + "\n" + finally: + return results \ No newline at end of file diff --git a/chat/arxiv_bot/prebuild.ipynb b/chat/arxiv_bot/prebuild.ipynb index 146284a83dacbcf6226fca3098918f633f6b9091..eab91ec4eac2dca2789b413fa4ee007104fd6d5c 100644 --- a/chat/arxiv_bot/prebuild.ipynb +++ b/chat/arxiv_bot/prebuild.ipynb @@ -1,354 +1,354 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "d:\\Program\\Anaconda\\envs\\python_project\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "d:\\Program\\Anaconda\\envs\\python_project\\lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", - " warnings.warn(\n", - "d:\\Program\\Anaconda\\envs\\python_project\\lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", - " warnings.warn(\n" - ] - } - ], - "source": [ - "import google.generativeai as genai\n", - "import arxiv_bot_utils as utils\n", - "import os\n", - "from getpass import getpass\n", - "import json\n", - "#chỉ là import một cách bình thường\n", - "#nội dung là " - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "models/gemini-1.0-pro\n", - "models/gemini-1.0-pro-001\n", - "models/gemini-1.0-pro-latest\n", - "models/gemini-1.0-pro-vision-latest\n", - "models/gemini-1.5-pro-latest\n", - "models/gemini-pro\n", - "models/gemini-pro-vision\n" - ] - } - ], - "source": [ - "os.environ['GEMINI_API_KEY'] = getpass(\"Input your API key: \")\n", - "# gán biến môi trường luôn\n", - "gemini_api_key = os.getenv(\"GEMINI_API_KEY\") # string trong môi trường\n", - "if not gemini_api_key:\n", - " raise ValueError(\n", - " \"Gemini API Key not provided. Please provide GEMINI_API_KEY as an environment variable\"\n", - " )\n", - "genai.configure(api_key=gemini_api_key)\n", - "for m in genai.list_models():\n", - " if 'generateContent' in m.supported_generation_methods:\n", - " print(m.name)\n", - " #models nằm trên máy chủ\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "config = genai.GenerationConfig(max_output_tokens=2048,\n", - " temperature=0.7)\n", - "safety_settings = [\n", - " {\n", - " \"category\": \"HARM_CATEGORY_DANGEROUS\",\n", - " \"threshold\": \"BLOCK_NONE\",\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", - " \"threshold\": \"BLOCK_NONE\",\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", - " \"threshold\": \"BLOCK_NONE\",\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", - " \"threshold\": \"BLOCK_NONE\",\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", - " \"threshold\": \"BLOCK_NONE\",\n", - " },\n", - "]\n", - "model = genai.GenerativeModel(\"gemini-pro\",\n", - " generation_config=config,\n", - " safety_settings=safety_settings)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "def extract_keyword_prompt(query):\n", - " \"\"\"A prompt that return a JSON block as arguments for querying database\"\"\"\n", - "\n", - " prompt = (\n", - " \"\"\"[INST] SYSTEM: You are an assistant that choose only one action below based on guest question.\n", - " 1. If the guest question is asking for a single specific document or article with explicit title, you need to respond the information in JSON format with 2 keys \"title\", \"author\" if found any above. The authors are separated with the word 'and'. \n", - " 2. If the guest question is asking for relevant informations about a topic, you need to respond the information in JSON format with 2 keys \"keywords\", \"description\", include a list of keywords represent the main academic topic, \\\n", - " and a description about the main topic. You may paraphrase the keywords to add more. \\\n", - " 3. If the guest is not asking for any informations or documents, you need to respond with a polite answer in JSON format with 1 key \"answer\".\n", - " QUESTION: '{query}'\n", - " [/INST]\n", - " ANSWER: \n", - " \"\"\"\n", - " ).format(query=query)\n", - "\n", - " return prompt\n", - "\n", - "def make_answer_prompt(input, contexts):\n", - " \"\"\"A prompt that return the final answer, based on the queried context\"\"\"\n", - "\n", - " prompt = (\n", - " \"\"\"[INST] You are a library assistant that help to search articles and documents based on user's question.\n", - " From guest's question, you have found some records and documents that may help. Now you need to answer the guest with the information found.\n", - " If no information found in the database, you may generate some other recommendation related to user's question using your own knowledge. Each article or paper must have a link to the pdf download page.\n", - " You should answer in a conversational form politely.\n", - " QUESTION: '{input}'\n", - " INFORMATION: '{contexts}'\n", - " [/INST]\n", - " ANSWER:\n", - " \"\"\"\n", - " ).format(input=input, contexts=contexts)\n", - "\n", - " return prompt" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "def response(args):\n", - " \"\"\"Create response context, based on input arguments\"\"\"\n", - " keys = list(dict.keys(args))\n", - " if \"answer\" in keys:\n", - " return args['answer'], None # trả lời trực tiếp\n", - " \n", - " if \"keywords\" in keys:\n", - " # perform query\n", - " query_texts = args[\"description\"]\n", - " keywords = args[\"keywords\"]\n", - " results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts)\n", - " # print(results)\n", - " ids = results['metadatas'][0]\n", - " if len(ids) == 0:\n", - " # go crawl some\n", - " new_records = utils.crawl_arxiv(keyword_list=keywords, max_results=10)\n", - " print(\"Got new records: \",len(new_records))\n", - " if type(new_records) == str:\n", - " return \"Error occured, information not found\", new_records\n", - " utils.db.add(new_records)\n", - " utils.sqldb.add(new_records)\n", - " results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts)\n", - " ids = results['metadatas'][0]\n", - " print(\"Re-queried on chromadb, results: \",ids)\n", - " paper_id = [id['paper_id'] for id in ids]\n", - " paper_info = utils.sqldb.query_id(paper_id)\n", - " print(paper_info)\n", - " records = [] # get title (2), author (3), link (6)\n", - " result_string = \"\"\n", - " if paper_info:\n", - " for i in range(len(paper_info)):\n", - " result_string += \"Title: {}, Author: {}, Link: {}\".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])\n", - " records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n", - " return result_string, records\n", - " else:\n", - " return \"Information not found\", \"Information not found\"\n", - " # invoke llm and return result\n", - "\n", - " if \"title\" in keys:\n", - " title = args['title']\n", - " authors = utils.authors_str_to_list(args['author'])\n", - " paper_info = utils.sqldb.query(title = title,author = authors)\n", - " # if query not found then go crawl brh\n", - " # print(paper_info)\n", - "\n", - " if len(paper_info) == 0:\n", - " new_records = utils.crawl_exact_paper(title=title,author=authors)\n", - " print(\"Got new records: \",len(new_records))\n", - " if type(new_records) == str:\n", - " # print(new_records)\n", - " return \"Error occured, information not found\", \"Information not found\"\n", - " utils.db.add(new_records)\n", - " utils.sqldb.add(new_records)\n", - " paper_info = utils.sqldb.query(title = title,author = authors)\n", - " print(\"Re-queried on chromadb, results: \",paper_info)\n", - " # -------------------------------------\n", - " records = [] # get title (2), author (3), link (6)\n", - " result_string = \"\"\n", - " for i in range(len(paper_info)):\n", - " result_string += \"Title: {}, Author: {}, Link: {}\".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])\n", - " records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n", - " # process results:\n", - " if len(result_string) == 0:\n", - " return \"Information not found\", \"Information not found\"\n", - " return result_string, records\n", - " # invoke llm and return result" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "def full_chain_single_question(input_prompt):\n", - " try:\n", - " first_prompt = extract_keyword_prompt(input_prompt)\n", - " temp_answer = model.generate_content(first_prompt).text\n", - "\n", - " args = json.loads(utils.trimming(temp_answer))\n", - " contexts, results = response(args)\n", - " if not results:\n", - " print(contexts)\n", - " else:\n", - " output_prompt = make_answer_prompt(input_prompt,contexts)\n", - " answer = model.generate_content(output_prompt).text\n", - " return temp_answer, answer\n", - " except Exception as e:\n", - " print(e)\n", - " return temp_answer, \"Error occured: \" + str(e)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[('1903.04824v1', 'computer science', 'Proceedings of the Fifth International Conference on Cloud and Robotics (ICCR2018)', ' Huaxi, Zhang, Jacques Malenfan', '2019-03-12', '2019-03-12', 'http://arxiv.org/pdf/1903.04824v1'), ('1709.07597v1', 'economics', 'Inverse Reinforcement Learning with Conditional Choice Probabilities', 'Mohit Sharma, Kris M. Kitani, Joachim Groege', '2017-09-22', '2017-09-22', 'http://arxiv.org/pdf/1709.07597v1')]\n", - "Sure, here are some key papers on model predictive control for nonlinear systems:\n", - "\n", - "* **Nonlinear Model Predictive Control: A Survey** by Garcia, P.D., Prett, D.M., and Morari, M. (1989)\n", - "* **Model Predictive Control for Nonlinear Systems** by Camacho, E.F. and Bordons, C. (1999)\n", - "* **Nonlinear Model Predictive Control** by Rawlings, J.B. and Mayne, D.Q. (2009)\n", - "\n", - "As for recent reviews on the application of control theory to robotics, here are a few:\n", - "\n", - "* **Control of Robot Manipulators** by Spong, M.W., Hutchinson, S., and Vidyasagar, M. (2006)\n", - "* **Robotics: Modelling, Planning and Control** by Siciliano, B., Sciavicco, L., Villani, L., and Oriolo, G. (2010)\n", - "* **Control of Robot Arms** by Featherstone, R. (2014)\n", - "\n", - "I hope this information is helpful. Please let me know if you have any other questions.\n" - ] - } - ], - "source": [ - "# test response, second step\n", - "input_prompt = \"Can you suggest some key papers on model predictive control for nonlinear systems, and are there any recent reviews on the application of control theory to robotics?\"\n", - "args = \"{\\n \\\"keywords\\\": [\\\"Model predictive control\\\", \\\"Nonlinear systems\\\", \\\"Robotics\\\", \\\"Control theory\\\"],\\n \\\"description\\\": \\\"Model predictive control (MPC) is a control algorithm that uses a model of the system to predict future behavior and optimize the control inputs. MPC is particularly well-suited for nonlinear systems, as it can handle the complex dynamics of these systems. In recent years, MPC has been increasingly applied to robotics, as it can improve the performance and safety of robotic systems. Control theory is a branch of mathematics that deals with the analysis and design of control systems. Control theory has been applied to a wide range of problems in robotics, including motion planning, trajectory tracking, and force control.\\\"\\n}\"\n", - "args = json.loads(args)\n", - "contexts, results = response(args)\n", - "if not results:\n", - " # direct answer\n", - " print(contexts)\n", - "else:\n", - " output_prompt = make_answer_prompt(input_prompt,contexts)\n", - " answer = model.generate_content(output_prompt).text\n", - " print(answer)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'desired': 'Natural Language Processing (Computer Science)', 'question': 'What are some recent papers on deep learning architectures for text classification, and can you recommend any surveys or reviews on the topic?'}\n", - "0\n", - "[('1808.08121v1', 'computer science', 'An Improvement of Data Classification Using Random Multimodel Deep Learning (RMDL)', 'Mojtaba Heidarysafa, Kamran Kowsari, Donald E. Brown, Kiana Jafari Meimandi, Laura E. Barne', '2018-08-23', '2018-08-23', 'http://arxiv.org/pdf/1808.08121v1'), ('1904.08067v5', 'computer science', 'Text Classification Algorithms: A Survey', 'Kamran Kowsari, Kiana Jafari Meimandi, Mojtaba Heidarysafa, Sanjana Mendu, Laura E. Barnes, Donald E. Brow', '2020-05-20', '2019-04-17', 'http://arxiv.org/pdf/1904.08067v5'), ('2202.09144v1', 'computer science', 'Modelling the semantics of text in complex document layouts using graph transformer networks', 'Thomas Roland Barillot, Jacob Saks, Polena Lilyanova, Edward Torgas, Yachen Hu, Yuanqing Liu, Varun Balupuri, Paul Gaskel', '2022-02-18', '2022-02-18', 'http://arxiv.org/pdf/2202.09144v1')]\n", - "1\n", - "[('1601.04187v1', 'computer science', 'Conversion of Artificial Recurrent Neural Networks to Spiking Neural Networks for Low-power Neuromorphic Hardware', 'Peter U. Diehl, Guido Zarrella, Andrew Cassidy, Bruno U. Pedroni, Emre Neftc', '2016-01-16', '2016-01-16', 'http://arxiv.org/pdf/1601.04187v1'), ('1801.01093v3', 'economics', 'Comparing the Forecasting Performances of Linear Models for Electricity Prices with High RES Penetration', 'Angelica Gianfreda, Francesco Ravazzolo, Luca Rossin', '2019-11-12', '2018-01-03', 'http://arxiv.org/pdf/1801.01093v3'), ('2302.11093v1', 'electrical engineering and system science', 'Use Cases for Time-Frequency Image Representations and Deep Learning Techniques for Improved Signal Classification', 'Mehmet Parla', '2023-02-22', '2023-02-22', 'http://arxiv.org/pdf/2302.11093v1')]\n", - "2\n", - "[('1505.07907v4', 'economics', 'Linking Economic Complexity, Institutions and Income Inequality', 'D. Hartmann, M. R. Guevara, C. Jara-Figueroa, M. Aristaran, C. A. Hidalg', '2017-01-04', '2015-05-29', 'http://arxiv.org/pdf/1505.07907v4'), ('2107.06855v2', 'economics', 'Comparing Intellectual property policy in the Global North and South -- A one-size-fits-all policy for economic prosperity?', 'S Sidhartha Narayan, Malavika Ranjan, Madhumitha Raghurama', '2021-08-10', '2021-07-14', 'http://arxiv.org/pdf/2107.06855v2'), ('1910.11780v1', 'economics', 'Inequality in Turkey: Looking Beyond Growth', 'Bayram Cakir, Ipek Ergu', '2019-10-25', '2019-10-25', 'http://arxiv.org/pdf/1910.11780v1')]\n", - "3\n", - "[('1607.06583v2', 'computer science', \"Classification of Alzheimer's Disease Structural MRI Data by Deep Learning Convolutional Neural Networks\", 'Saman Sarraf, Ghassem Tofigh', '2017-05-19', '2016-07-22', 'http://arxiv.org/pdf/1607.06583v2'), ('2101.10265v1', 'computer science', 'Superiorities of Deep Extreme Learning Machines against Convolutional Neural Networks', 'Gokhan Altan, Yakup Kutl', '2021-01-21', '2021-01-21', 'http://arxiv.org/pdf/2101.10265v1'), ('2208.03143v1', 'computer science', 'Deep Learning and Health Informatics for Smart Monitoring and Diagnosis', 'Amin Gasm', '2022-08-05', '2022-08-05', 'http://arxiv.org/pdf/2208.03143v1')]\n", - "4\n", - "[('2302.06584v3', 'computer science', 'Thermodynamic AI and the fluctuation frontier', 'Patrick J. Coles, Collin Szczepanski, Denis Melanson, Kaelan Donatella, Antonio J. Martinez, Faris Sbah', '2023-06-13', '2023-02-09', 'http://arxiv.org/pdf/2302.06584v3'), ('2307.12298v1', 'computer science', 'Stabilization and Dissipative Information Transfer of a Superconducting Kerr-Cat Qubit', 'Ufuk Korkmaz, Deniz Türkpenç', '2023-07-23', '2023-07-23', 'http://arxiv.org/pdf/2307.12298v1'), ('2106.10421v1', 'computer science', 'QFCNN: Quantum Fourier Convolutional Neural Network', 'Feihong Shen, Jun Li', '2021-06-19', '2021-06-19', 'http://arxiv.org/pdf/2106.10421v1')]\n", - "5\n", - "[('2308.16539v2', 'computer science', 'On a Connection between Differential Games, Optimal Control, and Energy-based Models for Multi-Agent Interactions', 'Christopher Diehl, Tobias Klosek, Martin Krüger, Nils Murzyn, Torsten Bertra', '2023-10-16', '2023-08-31', 'http://arxiv.org/pdf/2308.16539v2'), ('2404.12474v1', 'computer science', 'Learning a Stable, Safe, Distributed Feedback Controller for a Heterogeneous Platoon of Vehicles', 'Michael H. Shaham, Taskin Padi', '2024-04-18', '2024-04-18', 'http://arxiv.org/pdf/2404.12474v1'), ('2008.13221v1', 'computer science', 'Human-in-the-Loop Methods for Data-Driven and Reinforcement Learning Systems', 'Vinicius G. Goeck', '2020-08-30', '2020-08-30', 'http://arxiv.org/pdf/2008.13221v1')]\n", - "6\n", - "[('1911.06206v3', 'economics', 'Bayesian state-space modeling for analyzing heterogeneous network effects of US monetary policy', 'Niko Hauzenberger, Michael Pfarrhofe', '2020-09-10', '2019-11-14', 'http://arxiv.org/pdf/1911.06206v3'), ('2302.14114v1', 'economics', 'Econometric assessment of the monetary policy shocks in Morocco: Evidence from a Bayesian Factor-Augmented VAR', 'Marouane Daou', '2023-02-27', '2023-02-27', 'http://arxiv.org/pdf/2302.14114v1'), ('2311.11858v1', 'economics', 'Theory coherent shrinkage of Time-Varying Parameters in VARs', 'Andrea Renzett', '2023-11-20', '2023-11-20', 'http://arxiv.org/pdf/2311.11858v1')]\n", - "7\n", - "[('2310.03365v2', 'computer science', 'Swin-Tempo: Temporal-Aware Lung Nodule Detection in CT Scans as Video Sequences Using Swin Transformer-Enhanced UNet', 'Hossein Jafari, Karim Faez, Hamidreza Amindava', '2023-10-14', '2023-10-05', 'http://arxiv.org/pdf/2310.03365v2'), ('1808.08531v1', 'computer science', 'DeepTracker: Visualizing the Training Process of Convolutional Neural Networks', 'Dongyu Liu, Weiwei Cui, Kai Jin, Yuxiao Guo, Huamin Q', '2018-08-26', '2018-08-26', 'http://arxiv.org/pdf/1808.08531v1'), ('2105.10448v1', 'computer science', 'Distinguishing artefacts: evaluating the saturation point of convolutional neural networks', 'Ric Real, James Gopsill, David Jones, Chris Snider, Ben Hick', '2021-05-21', '2021-05-21', 'http://arxiv.org/pdf/2105.10448v1')]\n", - "8\n", - "Got new records: 10\n", - "Re-queried on chromadb, results: []\n", - "None\n", - "9\n", - "[('2403.07017v1', 'computer science', 'Mathematics of multi-agent learning systems at the interface of game theory and artificial intelligence', 'Long Wang, Feng Fu, Xingru Che', '2024-03-09', '2024-03-09', 'http://arxiv.org/pdf/2403.07017v1'), ('2210.02205v1', 'computer science', 'Game Theoretic Rating in N-player general-sum games with Equilibria', 'Luke Marris, Marc Lanctot, Ian Gemp, Shayegan Omidshafiei, Stephen McAleer, Jerome Connor, Karl Tuyls, Thore Graepe', '2022-10-05', '2022-10-05', 'http://arxiv.org/pdf/2210.02205v1'), ('2212.05357v3', 'economics', 'On Blockchain We Cooperate: An Evolutionary Game Perspective', 'Luyao Zhang, Xinyu Tia', '2023-01-19', '2022-12-10', 'http://arxiv.org/pdf/2212.05357v3')]\n" - ] - } - ], - "source": [ - "with open(\"test_questions.txt\",\"r\") as infile:\n", - " data = json.load(infile)\n", - "print(data[0])\n", - "\n", - "test_log = []\n", - "for i,t in enumerate(data):\n", - " print(i)\n", - " temp_answer, answer = full_chain_single_question(t['question'])\n", - " test_log.append({'desired topic':t['desired'],\n", - " 'question':t['question'],\n", - " 'first answer':temp_answer,\n", - " 'final answer':answer})\n", - "with open(\"test_results.json\",\"w\") as outfile:\n", - " json.dump(test_log,outfile)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "d:\\Program\\Anaconda\\envs\\python_project\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "d:\\Program\\Anaconda\\envs\\python_project\\lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", + " warnings.warn(\n", + "d:\\Program\\Anaconda\\envs\\python_project\\lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "import google.generativeai as genai\n", + "import arxiv_bot_utils as utils\n", + "import os\n", + "from getpass import getpass\n", + "import json\n", + "#chỉ là import một cách bình thường\n", + "#nội dung là " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "models/gemini-1.0-pro\n", + "models/gemini-1.0-pro-001\n", + "models/gemini-1.0-pro-latest\n", + "models/gemini-1.0-pro-vision-latest\n", + "models/gemini-1.5-pro-latest\n", + "models/gemini-pro\n", + "models/gemini-pro-vision\n" + ] + } + ], + "source": [ + "os.environ['GEMINI_API_KEY'] = getpass(\"Input your API key: \")\n", + "# gán biến môi trường luôn\n", + "gemini_api_key = os.getenv(\"GEMINI_API_KEY\") # string trong môi trường\n", + "if not gemini_api_key:\n", + " raise ValueError(\n", + " \"Gemini API Key not provided. Please provide GEMINI_API_KEY as an environment variable\"\n", + " )\n", + "genai.configure(api_key=gemini_api_key)\n", + "for m in genai.list_models():\n", + " if 'generateContent' in m.supported_generation_methods:\n", + " print(m.name)\n", + " #models nằm trên máy chủ\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "config = genai.GenerationConfig(max_output_tokens=2048,\n", + " temperature=0.7)\n", + "safety_settings = [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS\",\n", + " \"threshold\": \"BLOCK_NONE\",\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"threshold\": \"BLOCK_NONE\",\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"threshold\": \"BLOCK_NONE\",\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"threshold\": \"BLOCK_NONE\",\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"threshold\": \"BLOCK_NONE\",\n", + " },\n", + "]\n", + "model = genai.GenerativeModel(\"gemini-pro\",\n", + " generation_config=config,\n", + " safety_settings=safety_settings)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_keyword_prompt(query):\n", + " \"\"\"A prompt that return a JSON block as arguments for querying database\"\"\"\n", + "\n", + " prompt = (\n", + " \"\"\"[INST] SYSTEM: You are an assistant that choose only one action below based on guest question.\n", + " 1. If the guest question is asking for a single specific document or article with explicit title, you need to respond the information in JSON format with 2 keys \"title\", \"author\" if found any above. The authors are separated with the word 'and'. \n", + " 2. If the guest question is asking for relevant informations about a topic, you need to respond the information in JSON format with 2 keys \"keywords\", \"description\", include a list of keywords represent the main academic topic, \\\n", + " and a description about the main topic. You may paraphrase the keywords to add more. \\\n", + " 3. If the guest is not asking for any informations or documents, you need to respond with a polite answer in JSON format with 1 key \"answer\".\n", + " QUESTION: '{query}'\n", + " [/INST]\n", + " ANSWER: \n", + " \"\"\"\n", + " ).format(query=query)\n", + "\n", + " return prompt\n", + "\n", + "def make_answer_prompt(input, contexts):\n", + " \"\"\"A prompt that return the final answer, based on the queried context\"\"\"\n", + "\n", + " prompt = (\n", + " \"\"\"[INST] You are a library assistant that help to search articles and documents based on user's question.\n", + " From guest's question, you have found some records and documents that may help. Now you need to answer the guest with the information found.\n", + " If no information found in the database, you may generate some other recommendation related to user's question using your own knowledge. Each article or paper must have a link to the pdf download page.\n", + " You should answer in a conversational form politely.\n", + " QUESTION: '{input}'\n", + " INFORMATION: '{contexts}'\n", + " [/INST]\n", + " ANSWER:\n", + " \"\"\"\n", + " ).format(input=input, contexts=contexts)\n", + "\n", + " return prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def response(args):\n", + " \"\"\"Create response context, based on input arguments\"\"\"\n", + " keys = list(dict.keys(args))\n", + " if \"answer\" in keys:\n", + " return args['answer'], None # trả lời trực tiếp\n", + " \n", + " if \"keywords\" in keys:\n", + " # perform query\n", + " query_texts = args[\"description\"]\n", + " keywords = args[\"keywords\"]\n", + " results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts)\n", + " # print(results)\n", + " ids = results['metadatas'][0]\n", + " if len(ids) == 0:\n", + " # go crawl some\n", + " new_records = utils.crawl_arxiv(keyword_list=keywords, max_results=10)\n", + " print(\"Got new records: \",len(new_records))\n", + " if type(new_records) == str:\n", + " return \"Error occured, information not found\", new_records\n", + " utils.db.add(new_records)\n", + " utils.sqldb.add(new_records)\n", + " results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts)\n", + " ids = results['metadatas'][0]\n", + " print(\"Re-queried on chromadb, results: \",ids)\n", + " paper_id = [id['paper_id'] for id in ids]\n", + " paper_info = utils.sqldb.query_id(paper_id)\n", + " print(paper_info)\n", + " records = [] # get title (2), author (3), link (6)\n", + " result_string = \"\"\n", + " if paper_info:\n", + " for i in range(len(paper_info)):\n", + " result_string += \"Title: {}, Author: {}, Link: {}\".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])\n", + " records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n", + " return result_string, records\n", + " else:\n", + " return \"Information not found\", \"Information not found\"\n", + " # invoke llm and return result\n", + "\n", + " if \"title\" in keys:\n", + " title = args['title']\n", + " authors = utils.authors_str_to_list(args['author'])\n", + " paper_info = utils.sqldb.query(title = title,author = authors)\n", + " # if query not found then go crawl brh\n", + " # print(paper_info)\n", + "\n", + " if len(paper_info) == 0:\n", + " new_records = utils.crawl_exact_paper(title=title,author=authors)\n", + " print(\"Got new records: \",len(new_records))\n", + " if type(new_records) == str:\n", + " # print(new_records)\n", + " return \"Error occured, information not found\", \"Information not found\"\n", + " utils.db.add(new_records)\n", + " utils.sqldb.add(new_records)\n", + " paper_info = utils.sqldb.query(title = title,author = authors)\n", + " print(\"Re-queried on chromadb, results: \",paper_info)\n", + " # -------------------------------------\n", + " records = [] # get title (2), author (3), link (6)\n", + " result_string = \"\"\n", + " for i in range(len(paper_info)):\n", + " result_string += \"Title: {}, Author: {}, Link: {}\".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])\n", + " records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n", + " # process results:\n", + " if len(result_string) == 0:\n", + " return \"Information not found\", \"Information not found\"\n", + " return result_string, records\n", + " # invoke llm and return result" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def full_chain_single_question(input_prompt):\n", + " try:\n", + " first_prompt = extract_keyword_prompt(input_prompt)\n", + " temp_answer = model.generate_content(first_prompt).text\n", + "\n", + " args = json.loads(utils.trimming(temp_answer))\n", + " contexts, results = response(args)\n", + " if not results:\n", + " print(contexts)\n", + " else:\n", + " output_prompt = make_answer_prompt(input_prompt,contexts)\n", + " answer = model.generate_content(output_prompt).text\n", + " return temp_answer, answer\n", + " except Exception as e:\n", + " print(e)\n", + " return temp_answer, \"Error occured: \" + str(e)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[('1903.04824v1', 'computer science', 'Proceedings of the Fifth International Conference on Cloud and Robotics (ICCR2018)', ' Huaxi, Zhang, Jacques Malenfan', '2019-03-12', '2019-03-12', 'http://arxiv.org/pdf/1903.04824v1'), ('1709.07597v1', 'economics', 'Inverse Reinforcement Learning with Conditional Choice Probabilities', 'Mohit Sharma, Kris M. Kitani, Joachim Groege', '2017-09-22', '2017-09-22', 'http://arxiv.org/pdf/1709.07597v1')]\n", + "Sure, here are some key papers on model predictive control for nonlinear systems:\n", + "\n", + "* **Nonlinear Model Predictive Control: A Survey** by Garcia, P.D., Prett, D.M., and Morari, M. (1989)\n", + "* **Model Predictive Control for Nonlinear Systems** by Camacho, E.F. and Bordons, C. (1999)\n", + "* **Nonlinear Model Predictive Control** by Rawlings, J.B. and Mayne, D.Q. (2009)\n", + "\n", + "As for recent reviews on the application of control theory to robotics, here are a few:\n", + "\n", + "* **Control of Robot Manipulators** by Spong, M.W., Hutchinson, S., and Vidyasagar, M. (2006)\n", + "* **Robotics: Modelling, Planning and Control** by Siciliano, B., Sciavicco, L., Villani, L., and Oriolo, G. (2010)\n", + "* **Control of Robot Arms** by Featherstone, R. (2014)\n", + "\n", + "I hope this information is helpful. Please let me know if you have any other questions.\n" + ] + } + ], + "source": [ + "# test response, second step\n", + "input_prompt = \"Can you suggest some key papers on model predictive control for nonlinear systems, and are there any recent reviews on the application of control theory to robotics?\"\n", + "args = \"{\\n \\\"keywords\\\": [\\\"Model predictive control\\\", \\\"Nonlinear systems\\\", \\\"Robotics\\\", \\\"Control theory\\\"],\\n \\\"description\\\": \\\"Model predictive control (MPC) is a control algorithm that uses a model of the system to predict future behavior and optimize the control inputs. MPC is particularly well-suited for nonlinear systems, as it can handle the complex dynamics of these systems. In recent years, MPC has been increasingly applied to robotics, as it can improve the performance and safety of robotic systems. Control theory is a branch of mathematics that deals with the analysis and design of control systems. Control theory has been applied to a wide range of problems in robotics, including motion planning, trajectory tracking, and force control.\\\"\\n}\"\n", + "args = json.loads(args)\n", + "contexts, results = response(args)\n", + "if not results:\n", + " # direct answer\n", + " print(contexts)\n", + "else:\n", + " output_prompt = make_answer_prompt(input_prompt,contexts)\n", + " answer = model.generate_content(output_prompt).text\n", + " print(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'desired': 'Natural Language Processing (Computer Science)', 'question': 'What are some recent papers on deep learning architectures for text classification, and can you recommend any surveys or reviews on the topic?'}\n", + "0\n", + "[('1808.08121v1', 'computer science', 'An Improvement of Data Classification Using Random Multimodel Deep Learning (RMDL)', 'Mojtaba Heidarysafa, Kamran Kowsari, Donald E. Brown, Kiana Jafari Meimandi, Laura E. Barne', '2018-08-23', '2018-08-23', 'http://arxiv.org/pdf/1808.08121v1'), ('1904.08067v5', 'computer science', 'Text Classification Algorithms: A Survey', 'Kamran Kowsari, Kiana Jafari Meimandi, Mojtaba Heidarysafa, Sanjana Mendu, Laura E. Barnes, Donald E. Brow', '2020-05-20', '2019-04-17', 'http://arxiv.org/pdf/1904.08067v5'), ('2202.09144v1', 'computer science', 'Modelling the semantics of text in complex document layouts using graph transformer networks', 'Thomas Roland Barillot, Jacob Saks, Polena Lilyanova, Edward Torgas, Yachen Hu, Yuanqing Liu, Varun Balupuri, Paul Gaskel', '2022-02-18', '2022-02-18', 'http://arxiv.org/pdf/2202.09144v1')]\n", + "1\n", + "[('1601.04187v1', 'computer science', 'Conversion of Artificial Recurrent Neural Networks to Spiking Neural Networks for Low-power Neuromorphic Hardware', 'Peter U. Diehl, Guido Zarrella, Andrew Cassidy, Bruno U. Pedroni, Emre Neftc', '2016-01-16', '2016-01-16', 'http://arxiv.org/pdf/1601.04187v1'), ('1801.01093v3', 'economics', 'Comparing the Forecasting Performances of Linear Models for Electricity Prices with High RES Penetration', 'Angelica Gianfreda, Francesco Ravazzolo, Luca Rossin', '2019-11-12', '2018-01-03', 'http://arxiv.org/pdf/1801.01093v3'), ('2302.11093v1', 'electrical engineering and system science', 'Use Cases for Time-Frequency Image Representations and Deep Learning Techniques for Improved Signal Classification', 'Mehmet Parla', '2023-02-22', '2023-02-22', 'http://arxiv.org/pdf/2302.11093v1')]\n", + "2\n", + "[('1505.07907v4', 'economics', 'Linking Economic Complexity, Institutions and Income Inequality', 'D. Hartmann, M. R. Guevara, C. Jara-Figueroa, M. Aristaran, C. A. Hidalg', '2017-01-04', '2015-05-29', 'http://arxiv.org/pdf/1505.07907v4'), ('2107.06855v2', 'economics', 'Comparing Intellectual property policy in the Global North and South -- A one-size-fits-all policy for economic prosperity?', 'S Sidhartha Narayan, Malavika Ranjan, Madhumitha Raghurama', '2021-08-10', '2021-07-14', 'http://arxiv.org/pdf/2107.06855v2'), ('1910.11780v1', 'economics', 'Inequality in Turkey: Looking Beyond Growth', 'Bayram Cakir, Ipek Ergu', '2019-10-25', '2019-10-25', 'http://arxiv.org/pdf/1910.11780v1')]\n", + "3\n", + "[('1607.06583v2', 'computer science', \"Classification of Alzheimer's Disease Structural MRI Data by Deep Learning Convolutional Neural Networks\", 'Saman Sarraf, Ghassem Tofigh', '2017-05-19', '2016-07-22', 'http://arxiv.org/pdf/1607.06583v2'), ('2101.10265v1', 'computer science', 'Superiorities of Deep Extreme Learning Machines against Convolutional Neural Networks', 'Gokhan Altan, Yakup Kutl', '2021-01-21', '2021-01-21', 'http://arxiv.org/pdf/2101.10265v1'), ('2208.03143v1', 'computer science', 'Deep Learning and Health Informatics for Smart Monitoring and Diagnosis', 'Amin Gasm', '2022-08-05', '2022-08-05', 'http://arxiv.org/pdf/2208.03143v1')]\n", + "4\n", + "[('2302.06584v3', 'computer science', 'Thermodynamic AI and the fluctuation frontier', 'Patrick J. Coles, Collin Szczepanski, Denis Melanson, Kaelan Donatella, Antonio J. Martinez, Faris Sbah', '2023-06-13', '2023-02-09', 'http://arxiv.org/pdf/2302.06584v3'), ('2307.12298v1', 'computer science', 'Stabilization and Dissipative Information Transfer of a Superconducting Kerr-Cat Qubit', 'Ufuk Korkmaz, Deniz Türkpenç', '2023-07-23', '2023-07-23', 'http://arxiv.org/pdf/2307.12298v1'), ('2106.10421v1', 'computer science', 'QFCNN: Quantum Fourier Convolutional Neural Network', 'Feihong Shen, Jun Li', '2021-06-19', '2021-06-19', 'http://arxiv.org/pdf/2106.10421v1')]\n", + "5\n", + "[('2308.16539v2', 'computer science', 'On a Connection between Differential Games, Optimal Control, and Energy-based Models for Multi-Agent Interactions', 'Christopher Diehl, Tobias Klosek, Martin Krüger, Nils Murzyn, Torsten Bertra', '2023-10-16', '2023-08-31', 'http://arxiv.org/pdf/2308.16539v2'), ('2404.12474v1', 'computer science', 'Learning a Stable, Safe, Distributed Feedback Controller for a Heterogeneous Platoon of Vehicles', 'Michael H. Shaham, Taskin Padi', '2024-04-18', '2024-04-18', 'http://arxiv.org/pdf/2404.12474v1'), ('2008.13221v1', 'computer science', 'Human-in-the-Loop Methods for Data-Driven and Reinforcement Learning Systems', 'Vinicius G. Goeck', '2020-08-30', '2020-08-30', 'http://arxiv.org/pdf/2008.13221v1')]\n", + "6\n", + "[('1911.06206v3', 'economics', 'Bayesian state-space modeling for analyzing heterogeneous network effects of US monetary policy', 'Niko Hauzenberger, Michael Pfarrhofe', '2020-09-10', '2019-11-14', 'http://arxiv.org/pdf/1911.06206v3'), ('2302.14114v1', 'economics', 'Econometric assessment of the monetary policy shocks in Morocco: Evidence from a Bayesian Factor-Augmented VAR', 'Marouane Daou', '2023-02-27', '2023-02-27', 'http://arxiv.org/pdf/2302.14114v1'), ('2311.11858v1', 'economics', 'Theory coherent shrinkage of Time-Varying Parameters in VARs', 'Andrea Renzett', '2023-11-20', '2023-11-20', 'http://arxiv.org/pdf/2311.11858v1')]\n", + "7\n", + "[('2310.03365v2', 'computer science', 'Swin-Tempo: Temporal-Aware Lung Nodule Detection in CT Scans as Video Sequences Using Swin Transformer-Enhanced UNet', 'Hossein Jafari, Karim Faez, Hamidreza Amindava', '2023-10-14', '2023-10-05', 'http://arxiv.org/pdf/2310.03365v2'), ('1808.08531v1', 'computer science', 'DeepTracker: Visualizing the Training Process of Convolutional Neural Networks', 'Dongyu Liu, Weiwei Cui, Kai Jin, Yuxiao Guo, Huamin Q', '2018-08-26', '2018-08-26', 'http://arxiv.org/pdf/1808.08531v1'), ('2105.10448v1', 'computer science', 'Distinguishing artefacts: evaluating the saturation point of convolutional neural networks', 'Ric Real, James Gopsill, David Jones, Chris Snider, Ben Hick', '2021-05-21', '2021-05-21', 'http://arxiv.org/pdf/2105.10448v1')]\n", + "8\n", + "Got new records: 10\n", + "Re-queried on chromadb, results: []\n", + "None\n", + "9\n", + "[('2403.07017v1', 'computer science', 'Mathematics of multi-agent learning systems at the interface of game theory and artificial intelligence', 'Long Wang, Feng Fu, Xingru Che', '2024-03-09', '2024-03-09', 'http://arxiv.org/pdf/2403.07017v1'), ('2210.02205v1', 'computer science', 'Game Theoretic Rating in N-player general-sum games with Equilibria', 'Luke Marris, Marc Lanctot, Ian Gemp, Shayegan Omidshafiei, Stephen McAleer, Jerome Connor, Karl Tuyls, Thore Graepe', '2022-10-05', '2022-10-05', 'http://arxiv.org/pdf/2210.02205v1'), ('2212.05357v3', 'economics', 'On Blockchain We Cooperate: An Evolutionary Game Perspective', 'Luyao Zhang, Xinyu Tia', '2023-01-19', '2022-12-10', 'http://arxiv.org/pdf/2212.05357v3')]\n" + ] + } + ], + "source": [ + "with open(\"test_questions.txt\",\"r\") as infile:\n", + " data = json.load(infile)\n", + "print(data[0])\n", + "\n", + "test_log = []\n", + "for i,t in enumerate(data):\n", + " print(i)\n", + " temp_answer, answer = full_chain_single_question(t['question'])\n", + " test_log.append({'desired topic':t['desired'],\n", + " 'question':t['question'],\n", + " 'first answer':temp_answer,\n", + " 'final answer':answer})\n", + "with open(\"test_results.json\",\"w\") as outfile:\n", + " json.dump(test_log,outfile)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/chat/consumers.py b/chat/consumers.py index 955a35c90e10abd92da77d808f6bc24c2793e329..4e6c2108976837dbaf96858d446c8526a89b9bf9 100644 --- a/chat/consumers.py +++ b/chat/consumers.py @@ -1,21 +1,25 @@ import json -from . import model_manage as md -from chat.arxiv_bot.arxiv_bot_utils import ArxivSQL +from . import model_manage2 as md from channels.generic.websocket import WebsocketConsumer class ChatConsumer(WebsocketConsumer): def connect(self): self.accept() - self.db_instance = ArxivSQL() + self.model, self.session = md.init_model("auto") def disconnect(self, close_code): + del self.model, self.session pass + def receive(self, text_data): text_data_json = json.loads(text_data) message = text_data_json["messages"] print(message) - record, messagee = md.full_chain_history_question(message, self.db_instance) - print("First answer: ",record) - self.send(text_data=json.dumps({"message": messagee})) + question = message[-1]['content'] + response, history_state = md.full_chain_history_question(question, self.session, mode="auto") + # print("First answer: ",response) + print("Session history:") + md.print_history(history_state) + self.send(text_data=json.dumps({"message": response})) \ No newline at end of file diff --git a/chat/migrations/__pycache__/0001_initial.cpython-311.pyc b/chat/migrations/__pycache__/0001_initial.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70bcd13fd246d448fb42ef32e1caadace34f5f35 Binary files /dev/null and b/chat/migrations/__pycache__/0001_initial.cpython-311.pyc differ diff --git a/chat/migrations/__pycache__/__init__.cpython-311.pyc b/chat/migrations/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c95c655be736931f2af9102d8dadac43ca1575c1 Binary files /dev/null and b/chat/migrations/__pycache__/__init__.cpython-311.pyc differ diff --git a/chat/model_manage.py b/chat/model_manage.py index 6c8e2a8dfd3688faf749df5bb3c70e8636224293..f496d4fa512771f18ab8fee9aaabc807f8957a2c 100644 --- a/chat/model_manage.py +++ b/chat/model_manage.py @@ -1,271 +1,271 @@ -# my_app/model_manager.py -import google.generativeai as genai -import chat.arxiv_bot.arxiv_bot_utils as utils -import json +# # my_app/model_manager.py +# import google.generativeai as genai +# import chat.arxiv_bot.arxiv_bot_utils as utils +# import json -model = None +# model = None -model_retrieval = None +# model_retrieval = None -model_answer = None +# model_answer = None -RETRIEVAL_INSTRUCT = """You are an auto chatbot that response with only one action below based on user question. - 1. If the guest question is asking about a science topic, you need to respond the information in JSON schema below: - { - "keywords": [a list of string keywords about the topic], - "description": "a paragraph describing the topic in about 50 to 100 words" - } - 2. If the guest is not asking for any informations or documents, you need to respond in JSON schema below: - { - "answer": "your answer to the user question" - }""" +# RETRIEVAL_INSTRUCT = """You are an auto chatbot that response with only one action below based on user question. +# 1. If the guest question is asking about a science topic, you need to respond the information in JSON schema below: +# { +# "keywords": [a list of string keywords about the topic], +# "description": "a paragraph describing the topic in about 50 to 100 words" +# } +# 2. If the guest is not asking for any informations or documents, you need to respond in JSON schema below: +# { +# "answer": "your answer to the user question" +# }""" -ANSWER_INSTRUCT = """You are a library assistant that help answering customer question based on the information given. - You always answer in a conversational form naturally and politely. - You must introduce all the records given, each must contain title, authors and the link to the pdf file.""" +# ANSWER_INSTRUCT = """You are a library assistant that help answering customer question based on the information given. +# You always answer in a conversational form naturally and politely. +# You must introduce all the records given, each must contain title, authors and the link to the pdf file.""" -def create_model(): - with open("apikey.txt","r") as apikey: - key = apikey.readline() - genai.configure(api_key=key) - for m in genai.list_models(): - if 'generateContent' in m.supported_generation_methods: - print(m.name) - print("He was there") - config = genai.GenerationConfig(max_output_tokens=2048, - temperature=1.0) - safety_settings = [ - { - "category": "HARM_CATEGORY_DANGEROUS", - "threshold": "BLOCK_NONE", - }, - { - "category": "HARM_CATEGORY_HARASSMENT", - "threshold": "BLOCK_NONE", - }, - { - "category": "HARM_CATEGORY_HATE_SPEECH", - "threshold": "BLOCK_NONE", - }, - { - "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", - "threshold": "BLOCK_NONE", - }, - { - "category": "HARM_CATEGORY_DANGEROUS_CONTENT", - "threshold": "BLOCK_NONE", - }, - ] - global model, model_retrieval, model_answer - model = genai.GenerativeModel("gemini-1.5-pro-latest", - generation_config=config, - safety_settings=safety_settings) - model_retrieval = genai.GenerativeModel("gemini-1.5-pro-latest", - generation_config=config, - safety_settings=safety_settings, - system_instruction=RETRIEVAL_INSTRUCT) - model_answer = genai.GenerativeModel("gemini-1.5-pro-latest", - generation_config=config, - safety_settings=safety_settings, - system_instruction=ANSWER_INSTRUCT) - return model, model_answer, model_retrieval +# def create_model(): +# with open("apikey.txt","r") as apikey: +# key = apikey.readline() +# genai.configure(api_key=key) +# for m in genai.list_models(): +# if 'generateContent' in m.supported_generation_methods: +# print(m.name) +# print("He was there") +# config = genai.GenerationConfig(max_output_tokens=2048, +# temperature=1.0) +# safety_settings = [ +# { +# "category": "HARM_CATEGORY_DANGEROUS", +# "threshold": "BLOCK_NONE", +# }, +# { +# "category": "HARM_CATEGORY_HARASSMENT", +# "threshold": "BLOCK_NONE", +# }, +# { +# "category": "HARM_CATEGORY_HATE_SPEECH", +# "threshold": "BLOCK_NONE", +# }, +# { +# "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", +# "threshold": "BLOCK_NONE", +# }, +# { +# "category": "HARM_CATEGORY_DANGEROUS_CONTENT", +# "threshold": "BLOCK_NONE", +# }, +# ] +# global model, model_retrieval, model_answer +# model = genai.GenerativeModel("gemini-1.5-pro-latest", +# generation_config=config, +# safety_settings=safety_settings) +# model_retrieval = genai.GenerativeModel("gemini-1.5-pro-latest", +# generation_config=config, +# safety_settings=safety_settings, +# system_instruction=RETRIEVAL_INSTRUCT) +# model_answer = genai.GenerativeModel("gemini-1.5-pro-latest", +# generation_config=config, +# safety_settings=safety_settings, +# system_instruction=ANSWER_INSTRUCT) +# return model, model_answer, model_retrieval -def get_model(): - global model, model_answer, model_retrieval - if model is None: - # Khởi tạo model ở đây - model, model_answer, model_retrieval = create_model() # Giả sử create_model là hàm tạo model của bạn - return model, model_answer, model_retrieval +# def get_model(): +# global model, model_answer, model_retrieval +# if model is None: +# # Khởi tạo model ở đây +# model, model_answer, model_retrieval = create_model() # Giả sử create_model là hàm tạo model của bạn +# return model, model_answer, model_retrieval -def extract_keyword_prompt(query): - """A prompt that return a JSON block as arguments for querying database""" +# def extract_keyword_prompt(query): +# """A prompt that return a JSON block as arguments for querying database""" - prompt = """[INST] SYSTEM: You are an auto chatbot that response with only one action below based on user question. - 1. If the guest question is asking about a science topic, you need to respond the information in JSON schema below: - { - "keywords": [a list of string keywords about the topic], - "description": "a paragraph describing the topic in about 50 to 100 words" - } - 2. If the guest is not asking for any informations or documents, you need to respond in JSON schema below: - { - "answer": "your answer to the user question" - } - QUESTION: """ + query + """[/INST] - ANSWER: """ - return prompt +# prompt = """[INST] SYSTEM: You are an auto chatbot that response with only one action below based on user question. +# 1. If the guest question is asking about a science topic, you need to respond the information in JSON schema below: +# { +# "keywords": [a list of string keywords about the topic], +# "description": "a paragraph describing the topic in about 50 to 100 words" +# } +# 2. If the guest is not asking for any informations or documents, you need to respond in JSON schema below: +# { +# "answer": "your answer to the user question" +# } +# QUESTION: """ + query + """[/INST] +# ANSWER: """ +# return prompt -def make_answer_prompt(input, contexts): - """A prompt that return the final answer, based on the queried context""" +# def make_answer_prompt(input, contexts): +# """A prompt that return the final answer, based on the queried context""" - prompt = ( - """[INST] You are a library assistant that help answering customer QUESTION based on the INFORMATION given. - You always answer in a conversational form naturally and politely. - You must introduce all the records given, each must contain title, authors and the link to the pdf file. - QUESTION: {input} - INFORMATION: '{contexts}' - [/INST] - ANSWER: - """ - ).format(input=input, contexts=contexts) - return prompt +# prompt = ( +# """[INST] You are a library assistant that help answering customer QUESTION based on the INFORMATION given. +# You always answer in a conversational form naturally and politely. +# You must introduce all the records given, each must contain title, authors and the link to the pdf file. +# QUESTION: {input} +# INFORMATION: '{contexts}' +# [/INST] +# ANSWER: +# """ +# ).format(input=input, contexts=contexts) +# return prompt -def retrieval_chat_template(question): - return { - "role":"user", - "parts":[f"QUESTION: {question} \n ANSWER:"] - } +# def retrieval_chat_template(question): +# return { +# "role":"user", +# "parts":[f"QUESTION: {question} \n ANSWER:"] +# } -def answer_chat_template(question, contexts): - return { - "role":"user", - "parts":[f"QUESTION: {question} \n INFORMATION: {contexts} \n ANSWER:"] - } +# def answer_chat_template(question, contexts): +# return { +# "role":"user", +# "parts":[f"QUESTION: {question} \n INFORMATION: {contexts} \n ANSWER:"] +# } -def response(args, db_instance): - """Create response context, based on input arguments""" - keys = list(dict.keys(args)) - if "answer" in keys: - return args['answer'], None # trả lời trực tiếp +# def response(args, db_instance): +# """Create response context, based on input arguments""" +# keys = list(dict.keys(args)) +# if "answer" in keys: +# return args['answer'], None # trả lời trực tiếp - if "keywords" in keys: - # perform query - query_texts = args["description"] - keywords = args["keywords"] - results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts) - # print(results) - ids = results['metadatas'][0] - if len(ids) == 0: - # go crawl some - new_records = utils.crawl_arxiv(keyword_list=keywords, max_results=10) - print("Got new records: ",len(new_records)) - if type(new_records) == str: - return "Error occured, information not found", new_records - utils.db.add(new_records) - db_instance.add(new_records) - results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts) - ids = results['metadatas'][0] - print("Re-queried on chromadb, results: ",ids) - paper_id = [id['paper_id'] for id in ids] - paper_info = db_instance.query_id(paper_id) - print(paper_info) - records = [] # get title (2), author (3), link (6) - result_string = "" - if paper_info: - for i in range(len(paper_info)): - result_string += "Record no.{} - Title: {}, Author: {}, Link: {}, ".format(i+1,paper_info[i][2],paper_info[i][3],paper_info[i][6]) - id = paper_info[i][0] - selected_document = utils.db.query_exact(id)["documents"] - doc_str = "Summary:" - for doc in selected_document: - doc_str+= doc + " " - result_string += doc_str - records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]]) - return result_string, records - else: - return "Information not found", "Information not found" - # invoke llm and return result - - # if "title" in keys: - # title = args['title'] - # authors = utils.authors_str_to_list(args['author']) - # paper_info = db_instance.query(title = title,author = authors) - # # if query not found then go crawl brh - # # print(paper_info) - - # if len(paper_info) == 0: - # new_records = utils.crawl_exact_paper(title=title,author=authors) - # print("Got new records: ",len(new_records)) - # if type(new_records) == str: - # # print(new_records) - # return "Error occured, information not found", "Information not found" - # utils.db.add(new_records) - # db_instance.add(new_records) - # paper_info = db_instance.query(title = title,author = authors) - # print("Re-queried on chromadb, results: ",paper_info) - # # ------------------------------------- - # records = [] # get title (2), author (3), link (6) - # result_string = "" - # for i in range(len(paper_info)): - # result_string += "Title: {}, Author: {}, Link: {}".format(paper_info[i][2],paper_info[i][3],paper_info[i][6]) - # records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]]) - # # process results: - # if len(result_string) == 0: - # return "Information not found", "Information not found" - # return result_string, records - # invoke llm and return result - -def full_chain_single_question(input_prompt, db_instance): - try: - first_prompt = extract_keyword_prompt(input_prompt) - temp_answer = model.generate_content(first_prompt).text +# if "keywords" in keys: +# # perform query +# query_texts = args["description"] +# keywords = args["keywords"] +# results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts) +# # print(results) +# ids = results['metadatas'][0] +# if len(ids) == 0: +# # go crawl some +# new_records = utils.crawl_arxiv(keyword_list=keywords, max_results=10) +# print("Got new records: ",len(new_records)) +# if type(new_records) == str: +# return "Error occured, information not found", new_records +# utils.db.add(new_records) +# db_instance.add(new_records) +# results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts) +# ids = results['metadatas'][0] +# print("Re-queried on chromadb, results: ",ids) +# paper_id = [id['paper_id'] for id in ids] +# paper_info = db_instance.query_id(paper_id) +# print(paper_info) +# records = [] # get title (2), author (3), link (6) +# result_string = "" +# if paper_info: +# for i in range(len(paper_info)): +# result_string += "Record no.{} - Title: {}, Author: {}, Link: {}, ".format(i+1,paper_info[i][2],paper_info[i][3],paper_info[i][6]) +# id = paper_info[i][0] +# selected_document = utils.db.query_exact(id)["documents"] +# doc_str = "Summary:" +# for doc in selected_document: +# doc_str+= doc + " " +# result_string += doc_str +# records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]]) +# return result_string, records +# else: +# return "Information not found", "Information not found" +# # invoke llm and return result - args = json.loads(utils.trimming(temp_answer)) - contexts, results = response(args, db_instance) - if not results: - # print(contexts) - return "Random question, direct return", contexts - else: - output_prompt = make_answer_prompt(input_prompt,contexts) - answer = model.generate_content(output_prompt).text - return temp_answer, answer - except Exception as e: - # print(e) - return temp_answer, "Error occured: " + str(e) - +# # if "title" in keys: +# # title = args['title'] +# # authors = utils.authors_str_to_list(args['author']) +# # paper_info = db_instance.query(title = title,author = authors) +# # # if query not found then go crawl brh +# # # print(paper_info) -def format_chat_history_from_web(chat_history: list): - temp_chat = [] - for message in chat_history: - temp_chat.append( - { - "role": message["role"], - "parts": [message["content"]] - } - ) - return temp_chat +# # if len(paper_info) == 0: +# # new_records = utils.crawl_exact_paper(title=title,author=authors) +# # print("Got new records: ",len(new_records)) +# # if type(new_records) == str: +# # # print(new_records) +# # return "Error occured, information not found", "Information not found" +# # utils.db.add(new_records) +# # db_instance.add(new_records) +# # paper_info = db_instance.query(title = title,author = authors) +# # print("Re-queried on chromadb, results: ",paper_info) +# # # ------------------------------------- +# # records = [] # get title (2), author (3), link (6) +# # result_string = "" +# # for i in range(len(paper_info)): +# # result_string += "Title: {}, Author: {}, Link: {}".format(paper_info[i][2],paper_info[i][3],paper_info[i][6]) +# # records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]]) +# # # process results: +# # if len(result_string) == 0: +# # return "Information not found", "Information not found" +# # return result_string, records +# # invoke llm and return result -# def full_chain_history_question(chat_history: list, db_instance): +# def full_chain_single_question(input_prompt, db_instance): # try: -# temp_chat = format_chat_history_from_web(chat_history) -# print('Extracted temp chat: ',temp_chat) -# first_prompt = extract_keyword_prompt(temp_chat[-1]["parts"][0]) +# first_prompt = extract_keyword_prompt(input_prompt) # temp_answer = model.generate_content(first_prompt).text # args = json.loads(utils.trimming(temp_answer)) # contexts, results = response(args, db_instance) -# print('Context extracted: ',contexts) # if not results: +# # print(contexts) # return "Random question, direct return", contexts # else: -# QA_Prompt = make_answer_prompt(temp_chat[-1]["parts"][0], contexts) -# temp_chat[-1]["parts"] = QA_Prompt -# print(temp_chat) -# answer = model.generate_content(temp_chat).text +# output_prompt = make_answer_prompt(input_prompt,contexts) +# answer = model.generate_content(output_prompt).text # return temp_answer, answer # except Exception as e: # # print(e) # return temp_answer, "Error occured: " + str(e) + + +# def format_chat_history_from_web(chat_history: list): +# temp_chat = [] +# for message in chat_history: +# temp_chat.append( +# { +# "role": message["role"], +# "parts": [message["content"]] +# } +# ) +# return temp_chat + +# # def full_chain_history_question(chat_history: list, db_instance): +# # try: +# # temp_chat = format_chat_history_from_web(chat_history) +# # print('Extracted temp chat: ',temp_chat) +# # first_prompt = extract_keyword_prompt(temp_chat[-1]["parts"][0]) +# # temp_answer = model.generate_content(first_prompt).text -def full_chain_history_question(chat_history: list, db_instance): - try: - temp_chat = format_chat_history_from_web(chat_history) - question = temp_chat[-1]['parts'][0] - first_answer = model_retrieval.generate_content(temp_chat).text +# # args = json.loads(utils.trimming(temp_answer)) +# # contexts, results = response(args, db_instance) +# # print('Context extracted: ',contexts) +# # if not results: +# # return "Random question, direct return", contexts +# # else: +# # QA_Prompt = make_answer_prompt(temp_chat[-1]["parts"][0], contexts) +# # temp_chat[-1]["parts"] = QA_Prompt +# # print(temp_chat) +# # answer = model.generate_content(temp_chat).text +# # return temp_answer, answer +# # except Exception as e: +# # # print(e) +# # return temp_answer, "Error occured: " + str(e) + +# def full_chain_history_question(chat_history: list, db_instance): +# try: +# temp_chat = format_chat_history_from_web(chat_history) +# question = temp_chat[-1]['parts'][0] +# first_answer = model_retrieval.generate_content(temp_chat).text - print(first_answer) - args = json.loads(utils.trimming(first_answer)) +# print(first_answer) +# args = json.loads(utils.trimming(first_answer)) - contexts, results = response(args, db_instance) - if not results: - return "Random question, direct return", contexts - else: - print('Context to answers: ',contexts) - answer_chat = answer_chat_template(question, contexts) - temp_chat[-1] = answer_chat - answer = model_answer.generate_content(temp_chat).text - return first_answer, answer - except Exception as e: - if first_answer: - return first_answer, "Error occured: " + str(e) - else: - return "No answer", "Error occured: " + str(e) \ No newline at end of file +# contexts, results = response(args, db_instance) +# if not results: +# return "Random question, direct return", contexts +# else: +# print('Context to answers: ',contexts) +# answer_chat = answer_chat_template(question, contexts) +# temp_chat[-1] = answer_chat +# answer = model_answer.generate_content(temp_chat).text +# return first_answer, answer +# except Exception as e: +# if first_answer: +# return first_answer, "Error occured: " + str(e) +# else: +# return "No answer", "Error occured: " + str(e) \ No newline at end of file diff --git a/chat/model_manage2.py b/chat/model_manage2.py new file mode 100644 index 0000000000000000000000000000000000000000..57300306248b0c3e3754a802d823710b94fe7b49 --- /dev/null +++ b/chat/model_manage2.py @@ -0,0 +1,174 @@ +import chat.arxiv_bot.arxiv_bot_utils2 as utils +import google.generativeai as genai +import json +import os +from google.generativeai.types import content_types +from collections.abc import Iterable +from IPython import display +from IPython.display import Markdown + +# ----------------------- define instructions ----------------------- +system_instruction = """You are a library chatbot that help people to find relevant articles about a topic, or find a specific article with given title and authors. +Your job is to analyze the user question, generate enough parameters based on the user question and use the tools that are given to you. +Also, after the function call is done, you must post-process the results in a more conversational form, providing some explanation about the paper based on its summary to avoid recitation. +You must provide the link to its Arxiv pdf page.""" + +# --------------------------- define tools -------------------------- +def search_for_relevant_article(keywords: list['str'], topic_description: str) -> str: + """This tool is used to search for articles from the database which is relevant to a topic, using a list of more than 3 keywords and a long sentence topic description. + If there is not enough 3 keywords from the question, the model must generate more keywords related to the topic. + If there is no description about the topic, the model must generate a description for the function call. + \nThe result is a string describe the records found from the database: 'Record no. - Title: , Author: <authors>, Link: <link to the pdf file>, Summary: <summary of the article>'. There can be many records. + \nIf the result is 'Information not found' it means some error has occured, or the database has no relevant article""" + + print('Keywords: {}, description: {}'.format(keywords,topic_description)) + + results = utils.ArxivChroma.query_relevant(keywords=keywords, query_texts=topic_description) + # print(results) + ids = results['metadatas'][0] + if len(ids) == 0: + # go crawl some + new_records = utils.crawl_arxiv(keyword_list=keywords, max_results=10) + # print("Got new records: ",len(new_records)) + if type(new_records) == str: + return "Information not found" + + utils.ArxivChroma.add(new_records) + utils.ArxivSQL.add(new_records) + results = utils.ArxivChroma.query_relevant(keywords=keywords, query_texts=topic_description) + ids = results['metadatas'][0] + # print("Re-queried on chromadb, results: ",ids) + + paper_id = [id['paper_id'] for id in ids] + paper_info = utils.ArxivSQL.query_id(paper_id) + # print(paper_info) + records = [] # get title (2), author (3), link (6) + result_string = "" + if paper_info: + for i in range(len(paper_info)): + result_string += "Record no.{} - Title: {}, Author: {}, Link: {}, ".format(i+1,paper_info[i][2],paper_info[i][3],paper_info[i][6]) + id = paper_info[i][0] + selected_document = utils.ArxivChroma.query_exact(id)["documents"] + doc_str = "Summary:" + for doc in selected_document: + doc_str+= doc + " " + result_string += doc_str + records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]]) + return result_string + else: + return "Information not found" + +def search_for_specific_article(title: str, authors: list['str']) -> str: + """This tool is used to search for a specific article from the database, with its name and authors given. + \nThe result is a string describe the records found from the database: 'Record no. - Title: <title>, Author: <authors>, Link: <link to the pdf file>, Summary: <summary of the article>'. There can be many records. + \nIf the result is 'Information not found' it means some error has occured, or the database has no relevant article""" + + print('Keywords: {}, description: {}'.format(title,authors)) + + paper_info = utils.ArxivSQL.query(title = title,author = authors) + if len(paper_info) == 0: + new_records = utils.crawl_exact_paper(title=title,author=authors) + # print("Got new records: ",len(new_records)) + if type(new_records) == str: + # print(new_records) + return "Information not found" + utils.ArxivChroma.add(new_records) + utils.ArxivSQL.add(new_records) + paper_info = utils.ArxivSQL.query(title = title,author = authors) + # print("Re-queried on chromadb, results: ",paper_info) + # ------------------------------------- + records = [] # get title (2), author (3), link (6) + result_string = "" + if paper_info: + for i in range(len(paper_info)): + result_string += "Record no.{} - Title: {}, Author: {}, Link: {}, ".format(i+1,paper_info[i][2],paper_info[i][3],paper_info[i][6]) + id = paper_info[i][0] + selected_document = utils.ArxivChroma.query_exact(id)["documents"] + doc_str = "Summary:" + for doc in selected_document: + doc_str+= doc + " " + result_string += doc_str + records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]]) + # process results: + if len(result_string) == 0: + return "Information not found" + return result_string + +def answer_others_questions(question: str) -> str: + """This tool is the default option for other questions that are not related to article or paper request. The model will response the question with its own answer.""" + return question + +tools = [search_for_relevant_article, search_for_specific_article, answer_others_questions] +tools_name = ['search_for_relevant_article', 'search_for_specific_article', 'answer_others_questions'] + +# load key, prepare config ------------------------ +with open("apikey.txt","r") as apikey: + key = apikey.readline() +genai.configure(api_key=key) +generation_config = { + "temperature": 1, + "top_p": 1, + "top_k": 0, + "max_output_tokens": 2048, + "response_mime_type": "text/plain", +} +safety_settings = [ + { + "category": "HARM_CATEGORY_DANGEROUS", + "threshold": "BLOCK_NONE", + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "threshold": "BLOCK_NONE", + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "threshold": "BLOCK_NONE", + }, + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "threshold": "BLOCK_NONE", + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "threshold": "BLOCK_NONE", + }, +] +# this function return a tool_config with mode 'none', 'any', 'auto' +def tool_config_from_mode(mode: str, fns: Iterable[str] = ()): + """Create a tool config with the specified function calling mode.""" + return content_types.to_tool_config( + {"function_calling_config": {"mode": mode, "allowed_function_names": fns}} + ) + +def init_model(mode = "auto"): + # return an instance of a model, holding its own ChatSession + # every socket session holds its own model + # this function must be called upon socket init, also start_chat() to begin chat + model = genai.GenerativeModel(model_name="gemini-1.5-pro-latest", + safety_settings=safety_settings, + generation_config=generation_config, + tools=tools, + tool_config=tool_config_from_mode(mode), + system_instruction=system_instruction) + chat_instance = model.start_chat(enable_automatic_function_calling=True) + return model, chat_instance + +# handle tool call and chatsession +def full_chain_history_question(user_input, chat_instance: genai.ChatSession, mode="auto"): + try: + response = chat_instance.send_message(user_input,tool_config=tool_config_from_mode(mode)).text + return response, chat_instance.history + except Exception as e: + print(e) + return f'Error occured during call: {e}', chat_instance.history + +# for printing log session +def print_history(history): + for content in history: + part = content.parts[0] + print(content.role, "->", type(part).to_dict(part)) + print('-'*80) + +utils.ArxivChroma.connect() +utils.ArxivSQL.connect() \ No newline at end of file diff --git a/chatbot_django/__pycache__/__init__.cpython-311.pyc b/chatbot_django/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4515c4ccada06d1af33a67c62ab9b4174696bf75 Binary files /dev/null and b/chatbot_django/__pycache__/__init__.cpython-311.pyc differ diff --git a/chatbot_django/__pycache__/asgi.cpython-311.pyc b/chatbot_django/__pycache__/asgi.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ace63c118712aa669a9e843440e6ad39ca8d26d Binary files /dev/null and b/chatbot_django/__pycache__/asgi.cpython-311.pyc differ diff --git a/chatbot_django/__pycache__/settings.cpython-311.pyc b/chatbot_django/__pycache__/settings.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7314fb480ea51ce07ff52795e8469f3a24be6a9 Binary files /dev/null and b/chatbot_django/__pycache__/settings.cpython-311.pyc differ diff --git a/chatbot_django/__pycache__/urls.cpython-311.pyc b/chatbot_django/__pycache__/urls.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc57b88675d4c3ad4639af0acc8e4c7ff942750a Binary files /dev/null and b/chatbot_django/__pycache__/urls.cpython-311.pyc differ diff --git a/concat.txt b/concat.txt new file mode 100644 index 0000000000000000000000000000000000000000..866d4a058b34749b6498751e6cb073c3b0879cbe Binary files /dev/null and b/concat.txt differ diff --git a/db.sqlite3 b/db.sqlite3 index b3c0fabb06f6be8930ce2be04f65be8c86db7cb4..f27c51d09db170e76e6b7912cf2eb8087e6c202e 100644 Binary files a/db.sqlite3 and b/db.sqlite3 differ diff --git a/models/models--jinaai--jina-bert-implementation/blobs/64b6ce6fe4477c320b0ab303e2f26ae98beae1f7 b/models/models--jinaai--jina-bert-implementation/blobs/64b6ce6fe4477c320b0ab303e2f26ae98beae1f7 new file mode 100644 index 0000000000000000000000000000000000000000..64b6ce6fe4477c320b0ab303e2f26ae98beae1f7 --- /dev/null +++ b/models/models--jinaai--jina-bert-implementation/blobs/64b6ce6fe4477c320b0ab303e2f26ae98beae1f7 @@ -0,0 +1,2357 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023 Jina AI GmbH. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + + +import math +import os +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union +import numpy as np + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_bert import JinaBertConfig + +# Torch implementation +try: + from torch.nn.functional import scaled_dot_product_attention +except ImportError: + scaled_dot_product_attention = None + +# This is used by encode but user may not have it installed +try: + from tqdm.autonotebook import trange + + has_tqdm = True +except ImportError: + has_tqdm = False + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "bert-base-uncased" +_CONFIG_FOR_DOC = "JinaBertConfig" + +# TokenClassification docstring +_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = ( + "dbmdz/bert-large-cased-finetuned-conll03-english" +) +_TOKEN_CLASS_EXPECTED_OUTPUT = "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] " +_TOKEN_CLASS_EXPECTED_LOSS = 0.01 + +# QuestionAnswering docstring +_CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2" +_QA_EXPECTED_OUTPUT = "'a nice puppet'" +_QA_EXPECTED_LOSS = 7.41 +_QA_TARGET_START_INDEX = 14 +_QA_TARGET_END_INDEX = 15 + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity" +_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" +_SEQ_CLASS_EXPECTED_LOSS = 0.01 + + +def load_tf_weights_in_bert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n + in [ + "adam_v", + "adam_m", + "AdamWeightDecayOptimizer", + "AdamWeightDecayOptimizer_1", + "global_step", + ] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + if pointer.shape != array.shape: + raise ValueError( + f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + ) + except ValueError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class JinaBertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config: JinaBertConfig): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id + ) + if config.position_embedding_type != "alibi": + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size + ) + self.token_type_embeddings = nn.Embedding( + config.type_vocab_size, config.hidden_size + ) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + self.register_buffer( + "position_ids", + torch.arange(config.max_position_embeddings).expand((1, -1)), + persistent=False, + ) + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long), + persistent=False, + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[ + :, past_key_values_length : seq_length + past_key_values_length + ] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + input_shape[0], seq_length + ) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=self.position_ids.device + ) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class JinaBertSelfAttention(nn.Module): + def __init__(self, config: JinaBertConfig, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.attn_implementation = config.attn_implementation + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout_p = config.attention_probs_dropout_prob + self.dropout = nn.Dropout(self.dropout_p) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, self.attention_head_size + ) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + bias: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + if self.attn_implementation == 'torch' and scaled_dot_product_attention is not None: + b, _, s, _ = query_layer.shape + new_bias = attention_mask + bias + dropout_p = self.dropout_p if self.training else 0.0 + attn = scaled_dot_product_attention(query_layer, key_layer, value_layer, new_bias, dropout_p=dropout_p) + attn = attn.permute(0, 2, 1, 3).contiguous() + return (attn.view(b, s, self.all_head_size),) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor( + key_length - 1, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + else: + position_ids_l = torch.arange( + query_length, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + position_ids_r = torch.arange( + key_length, dtype=torch.long, device=hidden_states.device + ).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1 + ) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype + ) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + relative_position_scores_key = torch.einsum( + "bhrd,lrd->bhlr", key_layer, positional_embedding + ) + attention_scores = ( + attention_scores + + relative_position_scores_query + + relative_position_scores_key + ) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores + bias, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = ( + (context_layer, attention_probs) if output_attentions else (context_layer,) + ) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class JinaBertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, hidden_states: torch.Tensor, input_tensor: torch.Tensor + ) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class JinaBertAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = JinaBertSelfAttention( + config, position_embedding_type=position_embedding_type + ) + self.output = JinaBertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = ( + self.self.attention_head_size * self.self.num_attention_heads + ) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + bias: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + bias, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[ + 1: + ] # add attentions if we output them + return outputs + + +class JinaBertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class JinaBertOutput(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, hidden_states: torch.Tensor, input_tensor: torch.Tensor + ) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class JinaBertGLUMLP(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.config = config + self.gated_layers = nn.Linear( + config.hidden_size, config.intermediate_size * 2, bias=False + ) + if config.feed_forward_type == 'reglu': + self.act = nn.ReLU() + elif config.feed_forward_type == 'geglu': + self.act = nn.GELU() + else: + raise ValueError( + f"feed_forward_type {config.feed_forward_type} not supported" + ) + self.wo = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + residual_connection = hidden_states + # compute the activation + hidden_states = self.gated_layers(hidden_states) + gated = hidden_states[:, :, : self.config.intermediate_size] + non_gated = hidden_states[:, :, self.config.intermediate_size :] + hidden_states = self.act(gated) * non_gated + hidden_states = self.dropout(hidden_states) + # multiply by the second matrix + hidden_states = self.wo(hidden_states) + # add the residual connection and post-LN + hidden_states = self.layernorm(hidden_states + residual_connection) + return hidden_states + + +class JinaBertLayer(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = JinaBertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + self.feed_forward_type = config.feed_forward_type + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError( + f"{self} should be used as a decoder model if cross attention is added" + ) + self.crossattention = JinaBertAttention( + config, position_embedding_type="absolute" + ) + if self.feed_forward_type.endswith('glu'): + self.mlp = JinaBertGLUMLP(config) + else: + self.intermediate = JinaBertIntermediate(config) + self.output = JinaBertOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + bias: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None + ) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + bias=bias, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[ + 1: + ] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = ( + past_key_value[-2:] if past_key_value is not None else None + ) + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = ( + outputs + cross_attention_outputs[1:-1] + ) # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + if self.feed_forward_type.endswith('glu'): + layer_output = self.mlp(attention_output) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class JinaBertEncoder(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [JinaBertLayer(config) for _ in range(config.num_hidden_layers)] + ) + self.gradient_checkpointing = False + self.num_attention_heads = config.num_attention_heads + self.register_buffer( + "alibi", + self.rebuild_alibi_tensor(size=config.max_position_embeddings), + persistent=False, + ) + + def rebuild_alibi_tensor( + self, size: int, device: Optional[Union[torch.device, str]] = None + ): + # Alibi + # Following https://github.com/ofirpress/attention_with_linear_biases/issues/5 (Implementation 1) + # In the causal case, you can exploit the fact that softmax is invariant to a uniform translation + # of the logits, which makes the math work out *after* applying causal masking. If no causal masking + # will be applied, it is necessary to construct the diagonal mask. + n_heads = self.num_attention_heads + + def _get_alibi_head_slopes(n_heads: int) -> List[float]: + def get_slopes_power_of_2(n): + start = 2 ** (-(2 ** -(math.log2(n) - 3))) + ratio = start + return [start * ratio**i for i in range(n)] + + if math.log2(n_heads).is_integer(): + return get_slopes_power_of_2( + n_heads + ) # In the paper, we only train models that have 2^a heads for some a. This function has + else: # some good properties that only occur when the input is a power of 2. To maintain that even + closest_power_of_2 = 2 ** math.floor( + math.log2(n_heads) + ) # when the number of heads is not a power of 2, we use this workaround. + return ( + get_slopes_power_of_2(closest_power_of_2) + + _get_alibi_head_slopes(2 * closest_power_of_2)[0::2][ + : n_heads - closest_power_of_2 + ] + ) + + context_position = torch.arange(size, device=device)[:, None] + memory_position = torch.arange(size, device=device)[None, :] + relative_position = torch.abs(memory_position - context_position) + # [n_heads, max_token_length, max_token_length] + relative_position = relative_position.unsqueeze(0).expand(n_heads, -1, -1) + slopes = torch.Tensor(_get_alibi_head_slopes(n_heads)).to(device) * -1 + alibi = slopes.unsqueeze(1).unsqueeze(1) * relative_position + # [1, n_heads, max_token_length, max_token_length] + alibi = alibi.unsqueeze(0) + assert alibi.shape == torch.Size([1, n_heads, size, size]) + + self._current_alibi_size = size + return alibi + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + () if output_attentions and self.config.add_cross_attention else None + ) + + # Add alibi matrix to extended_attention_mask + _, seqlen, _ = hidden_states.size() + if self._current_alibi_size < seqlen: + # Rebuild the alibi tensor when needed + warnings.warn( + f'Increasing alibi size from {self._current_alibi_size} to {seqlen}.' + ) + self.register_buffer( + "alibi", + self.rebuild_alibi_tensor(size=seqlen, device=hidden_states.device).to( + hidden_states.dtype + ), + persistent=False, + ) + elif self.alibi.device != hidden_states.device: + # Device catch-up + self.alibi = self.alibi.to(hidden_states.device) + + alibi_bias = self.alibi[:, :, :seqlen, :seqlen] + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + alibi_bias, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + alibi_bias, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class JinaBertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class JinaBertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class JinaBertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = JinaBertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class JinaBertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = JinaBertLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class JinaBertOnlyNSPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class JinaBertPreTrainingHeads(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = JinaBertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class JinaBertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = JinaBertConfig + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = "bert" + supports_gradient_checkpointing = True + _no_split_modules = ["JinaBertLayer"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, JinaBertEncoder): + module.gradient_checkpointing = value + + +@dataclass +class JinaBertForPreTrainingOutput(ModelOutput): + """ + Output type of [`BertForPreTraining`]. + + Args: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +BERT_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", + BERT_START_DOCSTRING, +) +class JinaBertModel(JinaBertPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config: JinaBertConfig, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.emb_pooler = config.emb_pooler + self._name_or_path = config._name_or_path + if self.emb_pooler: + from transformers import AutoTokenizer + + self.tokenizer = AutoTokenizer.from_pretrained(config._name_or_path) + + self.embeddings = JinaBertEmbeddings(config) + self.encoder = JinaBertEncoder(config) + + self.pooler = JinaBertPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + @torch.inference_mode() + def encode( + self: 'JinaBertModel', + sentences: Union[str, List[str]], + batch_size: int = 32, + show_progress_bar: Optional[bool] = None, + output_value: str = 'sentence_embedding', + convert_to_numpy: bool = True, + convert_to_tensor: bool = False, + device: Optional[torch.device] = None, + normalize_embeddings: bool = False, + **tokenizer_kwargs, + ) -> Union[List[torch.Tensor], np.ndarray, torch.Tensor]: + """ + Computes sentence embeddings + + Args: + sentences(`str` or `List[str]`): + Sentence or sentences to be encoded + batch_size(`int`, *optional*, defaults to 32): + Batch size for the computation + show_progress_bar(`bool`, *optional*, defaults to None): + Show a progress bar when encoding sentences. + If set to None, progress bar is only shown when `logger.level == logging.INFO` or `logger.level == logging.DEBUG`. + output_value(`str`, *optional*, defaults to 'sentence_embedding'): + Default sentence_embedding, to get sentence embeddings. + Can be set to token_embeddings to get wordpiece token embeddings. + Set to None, to get all output values + convert_to_numpy(`bool`, *optional*, defaults to True): + If true, the output is a list of numpy vectors. + Else, it is a list of pytorch tensors. + convert_to_tensor(`bool`, *optional*, defaults to False): + If true, you get one large tensor as return. + Overwrites any setting from convert_to_numpy + device(`torch.device`, *optional*, defaults to None): + Which torch.device to use for the computation + normalize_embeddings(`bool`, *optional*, defaults to False): + If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. + tokenizer_kwargs(`Dict[str, Any]`, *optional*, defaults to {}): + Keyword arguments for the tokenizer + + Returns: + By default, a list of tensors is returned. + If convert_to_tensor, a stacked tensor is returned. + If convert_to_numpy, a numpy matrix is returned. + """ + if not self.emb_pooler: + warnings.warn("No emb_pooler specified, defaulting to mean pooling.") + self.emb_pooler = 'mean' + from transformers import AutoTokenizer + + self.tokenizer = AutoTokenizer.from_pretrained(self._name_or_path) + is_training = self.training + self.eval() + + if show_progress_bar is None: + show_progress_bar = ( + logger.getEffectiveLevel() == logging.INFO + or logger.getEffectiveLevel() == logging.DEBUG + ) + + if convert_to_tensor: + convert_to_numpy = False + + if output_value != 'sentence_embedding': + convert_to_tensor = False + convert_to_numpy = False + + input_was_string = False + if isinstance(sentences, str) or not hasattr(sentences, '__len__'): + sentences = [sentences] + input_was_string = True + + if device is not None: + self.to(device) + + # TODO: Maybe use better length heuristic? + permutation = np.argsort([-len(i) for i in sentences]) + inverse_permutation = np.argsort(permutation) + sentences = [sentences[idx] for idx in permutation] + + tokenizer_kwargs['padding'] = tokenizer_kwargs.get('padding', True) + tokenizer_kwargs['max_length'] = tokenizer_kwargs.get('max_length', 8192) + tokenizer_kwargs['truncation'] = tokenizer_kwargs.get('truncation', True) + + all_embeddings = [] + + if has_tqdm: + range_iter = trange( + 0, + len(sentences), + batch_size, + desc="Encoding", + disable=not show_progress_bar, + ) + else: + range_iter = range(0, len(sentences), batch_size) + + for i in range_iter: + encoded_input = self.tokenizer( + sentences[i : i + batch_size], + return_tensors='pt', + **tokenizer_kwargs, + ).to(self.device) + token_embs = self.forward(**encoded_input)[0] + + # Accumulate in fp32 to avoid overflow + token_embs = token_embs.float() + + if output_value == 'token_embeddings': + raise NotImplementedError + elif output_value is None: + raise NotImplementedError + else: + embeddings = self.mean_pooling( + token_embs, encoded_input['attention_mask'] + ) + + if normalize_embeddings: + embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) + + if convert_to_numpy: + embeddings = embeddings.cpu() + all_embeddings.extend(embeddings) + + all_embeddings = [all_embeddings[idx] for idx in inverse_permutation] + + if convert_to_tensor: + all_embeddings = torch.stack(all_embeddings) + elif convert_to_numpy: + all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings]) + + if input_was_string: + all_embeddings = all_embeddings[0] + + self.train(is_training) + return all_embeddings + + def mean_pooling( + self, token_embeddings: torch.Tensor, attention_mask: torch.Tensor + ): + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9 + ) + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + # self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] if past_key_values is not None else 0 + ) + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), device=device + ) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + batch_size, seq_length + ) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=device + ) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape + ) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None + ) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + BERT_START_DOCSTRING, +) +class JinaBertForPreTraining(JinaBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + + self.bert = JinaBertModel(config) + self.cls = JinaBertPreTrainingHeads(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=JinaBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + next_sentence_label: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], JinaBertForPreTrainingOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), + the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence + pair (see `input_ids` docstring) Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + + Returns: + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls( + sequence_output, pooled_output + ) + + total_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + next_sentence_loss = loss_fct( + seq_relationship_score.view(-1, 2), next_sentence_label.view(-1) + ) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return JinaBertForPreTrainingOutput( + loss=total_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """JinaBert Model with a `language modeling` head on top for CLM fine-tuning.""", + BERT_START_DOCSTRING, +) +class JinaBertLMHeadModel(JinaBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning( + "If you want to use `JinaBertLMHeadModel` as a standalone, add `is_decoder=True.`" + ) + + self.bert = JinaBertModel(config, add_pooling_layer=False) + self.cls = JinaBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + use_cache=True, + **model_kwargs, + ): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx) for past_state in layer_past + ), + ) + return reordered_past + + +@add_start_docstrings( + """JinaBert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING +) +class JinaBertForMaskedLM(JinaBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `JinaBertForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.bert = JinaBertModel(config, add_pooling_layer=False) + self.cls = JinaBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'paris'", + expected_loss=0.88, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ( + ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + ) + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, attention_mask=None, **model_kwargs + ): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + if self.config.pad_token_id is None: + raise ValueError("The PAD token should be defined for generation") + + attention_mask = torch.cat( + [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], + dim=-1, + ) + dummy_token = torch.full( + (effective_batch_size, 1), + self.config.pad_token_id, + dtype=torch.long, + device=input_ids.device, + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +@add_start_docstrings( + """JinaBert Model with a `next sentence prediction (classification)` head on top.""", + BERT_START_DOCSTRING, +) +class JinaBertForNextSentencePrediction(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = JinaBertModel(config) + self.cls = JinaBertOnlyNSPHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see `input_ids` docstring). Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + + Returns: + """ + + if "next_sentence_label" in kwargs: + warnings.warn( + "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" + " `labels` instead.", + FutureWarning, + ) + labels = kwargs.pop("next_sentence_label") + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + seq_relationship_scores = self.cls(pooled_output) + + next_sentence_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + next_sentence_loss = loss_fct( + seq_relationship_scores.view(-1, 2), labels.view(-1) + ) + + if not return_dict: + output = (seq_relationship_scores,) + outputs[2:] + return ( + ((next_sentence_loss,) + output) + if next_sentence_loss is not None + else output + ) + + return NextSentencePredictorOutput( + loss=next_sentence_loss, + logits=seq_relationship_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + BERT_START_DOCSTRING, +) +class JinaBertForSequenceClassification(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.bert = JinaBertModel(config) + classifier_dropout = ( + config.classifier_dropout + if config.classifier_dropout is not None + else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and ( + labels.dtype == torch.long or labels.dtype == torch.int + ): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + BERT_START_DOCSTRING, +) +class JinaBertForMultipleChoice(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = JinaBertModel(config) + classifier_dropout = ( + config.classifier_dropout + if config.classifier_dropout is not None + else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + num_choices = ( + input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + ) + + input_ids = ( + input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + ) + attention_mask = ( + attention_mask.view(-1, attention_mask.size(-1)) + if attention_mask is not None + else None + ) + token_type_ids = ( + token_type_ids.view(-1, token_type_ids.size(-1)) + if token_type_ids is not None + else None + ) + position_ids = ( + position_ids.view(-1, position_ids.size(-1)) + if position_ids is not None + else None + ) + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + BERT_START_DOCSTRING, +) +class JinaBertForTokenClassification(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = JinaBertModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout + if config.classifier_dropout is not None + else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, + expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BERT_START_DOCSTRING, +) +class JinaBertForQuestionAnswering(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = JinaBertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_QA, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + qa_target_start_index=_QA_TARGET_START_INDEX, + qa_target_end_index=_QA_TARGET_END_INDEX, + expected_output=_QA_EXPECTED_OUTPUT, + expected_loss=_QA_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + start_positions: Optional[torch.Tensor] = None, + end_positions: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + diff --git a/models/models--jinaai--jina-bert-implementation/blobs/823d01be739976929f1de49c19ba4eec80e604cf b/models/models--jinaai--jina-bert-implementation/blobs/823d01be739976929f1de49c19ba4eec80e604cf new file mode 100644 index 0000000000000000000000000000000000000000..823d01be739976929f1de49c19ba4eec80e604cf --- /dev/null +++ b/models/models--jinaai--jina-bert-implementation/blobs/823d01be739976929f1de49c19ba4eec80e604cf @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023 Jina AI GmbH. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BERT model configuration""" +from collections import OrderedDict +from typing import Mapping + +from transformers.configuration_utils import PretrainedConfig +from transformers.onnx import OnnxConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + + +class JinaBertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`JinaBertModel`]. It is used to + instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the BERT + [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + feed_forward_type (`str`, *optional*, defaults to `"original"`): + The type of feed forward layer to use in the bert layers. + Can be one of GLU variants, e.g. `"reglu"`, `"geglu"` + emb_pooler (`str`, *optional*, defaults to `None`): + The function to use for pooling the last layer embeddings to get the sentence embeddings. + Should be one of `None`, `"mean"`. + attn_implementation (`str`, *optional*, defaults to `"torch"`): + The implementation of the self-attention layer. Can be one of: + - `None` for the original implementation, + - `torch` for the PyTorch SDPA implementation, + + Examples: + + ```python + >>> from transformers import JinaBertConfig, JinaBertModel + + >>> # Initializing a JinaBert configuration + >>> configuration = JinaBertConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = JinaBertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # Encode text inputs + >>> embeddings = model.encode(text_inputs) + ```""" + model_type = "bert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + feed_forward_type="original", + emb_pooler=None, + attn_implementation='torch', + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.feed_forward_type = feed_forward_type + self.emb_pooler = emb_pooler + self.attn_implementation = attn_implementation + +class JinaBertOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ("token_type_ids", dynamic_axis), + ] + ) diff --git a/models/models--jinaai--jina-bert-implementation/refs/main b/models/models--jinaai--jina-bert-implementation/refs/main new file mode 100644 index 0000000000000000000000000000000000000000..345f3e6f012a8b045242364223eece1c535f4112 --- /dev/null +++ b/models/models--jinaai--jina-bert-implementation/refs/main @@ -0,0 +1 @@ +f3ec4cf7de7e561007f27c9efc7148b0bd713f81 \ No newline at end of file diff --git a/models/models--jinaai--jina-bert-implementation/snapshots/f3ec4cf7de7e561007f27c9efc7148b0bd713f81/configuration_bert.py b/models/models--jinaai--jina-bert-implementation/snapshots/f3ec4cf7de7e561007f27c9efc7148b0bd713f81/configuration_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..823d01be739976929f1de49c19ba4eec80e604cf --- /dev/null +++ b/models/models--jinaai--jina-bert-implementation/snapshots/f3ec4cf7de7e561007f27c9efc7148b0bd713f81/configuration_bert.py @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023 Jina AI GmbH. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BERT model configuration""" +from collections import OrderedDict +from typing import Mapping + +from transformers.configuration_utils import PretrainedConfig +from transformers.onnx import OnnxConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + + +class JinaBertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`JinaBertModel`]. It is used to + instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the BERT + [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + feed_forward_type (`str`, *optional*, defaults to `"original"`): + The type of feed forward layer to use in the bert layers. + Can be one of GLU variants, e.g. `"reglu"`, `"geglu"` + emb_pooler (`str`, *optional*, defaults to `None`): + The function to use for pooling the last layer embeddings to get the sentence embeddings. + Should be one of `None`, `"mean"`. + attn_implementation (`str`, *optional*, defaults to `"torch"`): + The implementation of the self-attention layer. Can be one of: + - `None` for the original implementation, + - `torch` for the PyTorch SDPA implementation, + + Examples: + + ```python + >>> from transformers import JinaBertConfig, JinaBertModel + + >>> # Initializing a JinaBert configuration + >>> configuration = JinaBertConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = JinaBertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # Encode text inputs + >>> embeddings = model.encode(text_inputs) + ```""" + model_type = "bert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + feed_forward_type="original", + emb_pooler=None, + attn_implementation='torch', + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.feed_forward_type = feed_forward_type + self.emb_pooler = emb_pooler + self.attn_implementation = attn_implementation + +class JinaBertOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ("token_type_ids", dynamic_axis), + ] + ) diff --git a/models/models--jinaai--jina-bert-implementation/snapshots/f3ec4cf7de7e561007f27c9efc7148b0bd713f81/modeling_bert.py b/models/models--jinaai--jina-bert-implementation/snapshots/f3ec4cf7de7e561007f27c9efc7148b0bd713f81/modeling_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..64b6ce6fe4477c320b0ab303e2f26ae98beae1f7 --- /dev/null +++ b/models/models--jinaai--jina-bert-implementation/snapshots/f3ec4cf7de7e561007f27c9efc7148b0bd713f81/modeling_bert.py @@ -0,0 +1,2357 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023 Jina AI GmbH. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + + +import math +import os +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union +import numpy as np + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_bert import JinaBertConfig + +# Torch implementation +try: + from torch.nn.functional import scaled_dot_product_attention +except ImportError: + scaled_dot_product_attention = None + +# This is used by encode but user may not have it installed +try: + from tqdm.autonotebook import trange + + has_tqdm = True +except ImportError: + has_tqdm = False + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "bert-base-uncased" +_CONFIG_FOR_DOC = "JinaBertConfig" + +# TokenClassification docstring +_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = ( + "dbmdz/bert-large-cased-finetuned-conll03-english" +) +_TOKEN_CLASS_EXPECTED_OUTPUT = "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] " +_TOKEN_CLASS_EXPECTED_LOSS = 0.01 + +# QuestionAnswering docstring +_CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2" +_QA_EXPECTED_OUTPUT = "'a nice puppet'" +_QA_EXPECTED_LOSS = 7.41 +_QA_TARGET_START_INDEX = 14 +_QA_TARGET_END_INDEX = 15 + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity" +_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" +_SEQ_CLASS_EXPECTED_LOSS = 0.01 + + +def load_tf_weights_in_bert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n + in [ + "adam_v", + "adam_m", + "AdamWeightDecayOptimizer", + "AdamWeightDecayOptimizer_1", + "global_step", + ] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + if pointer.shape != array.shape: + raise ValueError( + f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + ) + except ValueError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class JinaBertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config: JinaBertConfig): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id + ) + if config.position_embedding_type != "alibi": + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size + ) + self.token_type_embeddings = nn.Embedding( + config.type_vocab_size, config.hidden_size + ) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + self.register_buffer( + "position_ids", + torch.arange(config.max_position_embeddings).expand((1, -1)), + persistent=False, + ) + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long), + persistent=False, + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[ + :, past_key_values_length : seq_length + past_key_values_length + ] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + input_shape[0], seq_length + ) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=self.position_ids.device + ) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class JinaBertSelfAttention(nn.Module): + def __init__(self, config: JinaBertConfig, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.attn_implementation = config.attn_implementation + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout_p = config.attention_probs_dropout_prob + self.dropout = nn.Dropout(self.dropout_p) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, self.attention_head_size + ) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + bias: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + if self.attn_implementation == 'torch' and scaled_dot_product_attention is not None: + b, _, s, _ = query_layer.shape + new_bias = attention_mask + bias + dropout_p = self.dropout_p if self.training else 0.0 + attn = scaled_dot_product_attention(query_layer, key_layer, value_layer, new_bias, dropout_p=dropout_p) + attn = attn.permute(0, 2, 1, 3).contiguous() + return (attn.view(b, s, self.all_head_size),) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor( + key_length - 1, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + else: + position_ids_l = torch.arange( + query_length, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + position_ids_r = torch.arange( + key_length, dtype=torch.long, device=hidden_states.device + ).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1 + ) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype + ) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + relative_position_scores_key = torch.einsum( + "bhrd,lrd->bhlr", key_layer, positional_embedding + ) + attention_scores = ( + attention_scores + + relative_position_scores_query + + relative_position_scores_key + ) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores + bias, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = ( + (context_layer, attention_probs) if output_attentions else (context_layer,) + ) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class JinaBertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, hidden_states: torch.Tensor, input_tensor: torch.Tensor + ) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class JinaBertAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = JinaBertSelfAttention( + config, position_embedding_type=position_embedding_type + ) + self.output = JinaBertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = ( + self.self.attention_head_size * self.self.num_attention_heads + ) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + bias: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + bias, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[ + 1: + ] # add attentions if we output them + return outputs + + +class JinaBertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class JinaBertOutput(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, hidden_states: torch.Tensor, input_tensor: torch.Tensor + ) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class JinaBertGLUMLP(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.config = config + self.gated_layers = nn.Linear( + config.hidden_size, config.intermediate_size * 2, bias=False + ) + if config.feed_forward_type == 'reglu': + self.act = nn.ReLU() + elif config.feed_forward_type == 'geglu': + self.act = nn.GELU() + else: + raise ValueError( + f"feed_forward_type {config.feed_forward_type} not supported" + ) + self.wo = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + residual_connection = hidden_states + # compute the activation + hidden_states = self.gated_layers(hidden_states) + gated = hidden_states[:, :, : self.config.intermediate_size] + non_gated = hidden_states[:, :, self.config.intermediate_size :] + hidden_states = self.act(gated) * non_gated + hidden_states = self.dropout(hidden_states) + # multiply by the second matrix + hidden_states = self.wo(hidden_states) + # add the residual connection and post-LN + hidden_states = self.layernorm(hidden_states + residual_connection) + return hidden_states + + +class JinaBertLayer(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = JinaBertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + self.feed_forward_type = config.feed_forward_type + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError( + f"{self} should be used as a decoder model if cross attention is added" + ) + self.crossattention = JinaBertAttention( + config, position_embedding_type="absolute" + ) + if self.feed_forward_type.endswith('glu'): + self.mlp = JinaBertGLUMLP(config) + else: + self.intermediate = JinaBertIntermediate(config) + self.output = JinaBertOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + bias: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None + ) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + bias=bias, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[ + 1: + ] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = ( + past_key_value[-2:] if past_key_value is not None else None + ) + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = ( + outputs + cross_attention_outputs[1:-1] + ) # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + if self.feed_forward_type.endswith('glu'): + layer_output = self.mlp(attention_output) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class JinaBertEncoder(nn.Module): + def __init__(self, config: JinaBertConfig): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [JinaBertLayer(config) for _ in range(config.num_hidden_layers)] + ) + self.gradient_checkpointing = False + self.num_attention_heads = config.num_attention_heads + self.register_buffer( + "alibi", + self.rebuild_alibi_tensor(size=config.max_position_embeddings), + persistent=False, + ) + + def rebuild_alibi_tensor( + self, size: int, device: Optional[Union[torch.device, str]] = None + ): + # Alibi + # Following https://github.com/ofirpress/attention_with_linear_biases/issues/5 (Implementation 1) + # In the causal case, you can exploit the fact that softmax is invariant to a uniform translation + # of the logits, which makes the math work out *after* applying causal masking. If no causal masking + # will be applied, it is necessary to construct the diagonal mask. + n_heads = self.num_attention_heads + + def _get_alibi_head_slopes(n_heads: int) -> List[float]: + def get_slopes_power_of_2(n): + start = 2 ** (-(2 ** -(math.log2(n) - 3))) + ratio = start + return [start * ratio**i for i in range(n)] + + if math.log2(n_heads).is_integer(): + return get_slopes_power_of_2( + n_heads + ) # In the paper, we only train models that have 2^a heads for some a. This function has + else: # some good properties that only occur when the input is a power of 2. To maintain that even + closest_power_of_2 = 2 ** math.floor( + math.log2(n_heads) + ) # when the number of heads is not a power of 2, we use this workaround. + return ( + get_slopes_power_of_2(closest_power_of_2) + + _get_alibi_head_slopes(2 * closest_power_of_2)[0::2][ + : n_heads - closest_power_of_2 + ] + ) + + context_position = torch.arange(size, device=device)[:, None] + memory_position = torch.arange(size, device=device)[None, :] + relative_position = torch.abs(memory_position - context_position) + # [n_heads, max_token_length, max_token_length] + relative_position = relative_position.unsqueeze(0).expand(n_heads, -1, -1) + slopes = torch.Tensor(_get_alibi_head_slopes(n_heads)).to(device) * -1 + alibi = slopes.unsqueeze(1).unsqueeze(1) * relative_position + # [1, n_heads, max_token_length, max_token_length] + alibi = alibi.unsqueeze(0) + assert alibi.shape == torch.Size([1, n_heads, size, size]) + + self._current_alibi_size = size + return alibi + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + () if output_attentions and self.config.add_cross_attention else None + ) + + # Add alibi matrix to extended_attention_mask + _, seqlen, _ = hidden_states.size() + if self._current_alibi_size < seqlen: + # Rebuild the alibi tensor when needed + warnings.warn( + f'Increasing alibi size from {self._current_alibi_size} to {seqlen}.' + ) + self.register_buffer( + "alibi", + self.rebuild_alibi_tensor(size=seqlen, device=hidden_states.device).to( + hidden_states.dtype + ), + persistent=False, + ) + elif self.alibi.device != hidden_states.device: + # Device catch-up + self.alibi = self.alibi.to(hidden_states.device) + + alibi_bias = self.alibi[:, :, :seqlen, :seqlen] + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + alibi_bias, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + alibi_bias, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class JinaBertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class JinaBertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class JinaBertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = JinaBertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class JinaBertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = JinaBertLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class JinaBertOnlyNSPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class JinaBertPreTrainingHeads(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = JinaBertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class JinaBertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = JinaBertConfig + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = "bert" + supports_gradient_checkpointing = True + _no_split_modules = ["JinaBertLayer"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, JinaBertEncoder): + module.gradient_checkpointing = value + + +@dataclass +class JinaBertForPreTrainingOutput(ModelOutput): + """ + Output type of [`BertForPreTraining`]. + + Args: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +BERT_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", + BERT_START_DOCSTRING, +) +class JinaBertModel(JinaBertPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config: JinaBertConfig, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.emb_pooler = config.emb_pooler + self._name_or_path = config._name_or_path + if self.emb_pooler: + from transformers import AutoTokenizer + + self.tokenizer = AutoTokenizer.from_pretrained(config._name_or_path) + + self.embeddings = JinaBertEmbeddings(config) + self.encoder = JinaBertEncoder(config) + + self.pooler = JinaBertPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + @torch.inference_mode() + def encode( + self: 'JinaBertModel', + sentences: Union[str, List[str]], + batch_size: int = 32, + show_progress_bar: Optional[bool] = None, + output_value: str = 'sentence_embedding', + convert_to_numpy: bool = True, + convert_to_tensor: bool = False, + device: Optional[torch.device] = None, + normalize_embeddings: bool = False, + **tokenizer_kwargs, + ) -> Union[List[torch.Tensor], np.ndarray, torch.Tensor]: + """ + Computes sentence embeddings + + Args: + sentences(`str` or `List[str]`): + Sentence or sentences to be encoded + batch_size(`int`, *optional*, defaults to 32): + Batch size for the computation + show_progress_bar(`bool`, *optional*, defaults to None): + Show a progress bar when encoding sentences. + If set to None, progress bar is only shown when `logger.level == logging.INFO` or `logger.level == logging.DEBUG`. + output_value(`str`, *optional*, defaults to 'sentence_embedding'): + Default sentence_embedding, to get sentence embeddings. + Can be set to token_embeddings to get wordpiece token embeddings. + Set to None, to get all output values + convert_to_numpy(`bool`, *optional*, defaults to True): + If true, the output is a list of numpy vectors. + Else, it is a list of pytorch tensors. + convert_to_tensor(`bool`, *optional*, defaults to False): + If true, you get one large tensor as return. + Overwrites any setting from convert_to_numpy + device(`torch.device`, *optional*, defaults to None): + Which torch.device to use for the computation + normalize_embeddings(`bool`, *optional*, defaults to False): + If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. + tokenizer_kwargs(`Dict[str, Any]`, *optional*, defaults to {}): + Keyword arguments for the tokenizer + + Returns: + By default, a list of tensors is returned. + If convert_to_tensor, a stacked tensor is returned. + If convert_to_numpy, a numpy matrix is returned. + """ + if not self.emb_pooler: + warnings.warn("No emb_pooler specified, defaulting to mean pooling.") + self.emb_pooler = 'mean' + from transformers import AutoTokenizer + + self.tokenizer = AutoTokenizer.from_pretrained(self._name_or_path) + is_training = self.training + self.eval() + + if show_progress_bar is None: + show_progress_bar = ( + logger.getEffectiveLevel() == logging.INFO + or logger.getEffectiveLevel() == logging.DEBUG + ) + + if convert_to_tensor: + convert_to_numpy = False + + if output_value != 'sentence_embedding': + convert_to_tensor = False + convert_to_numpy = False + + input_was_string = False + if isinstance(sentences, str) or not hasattr(sentences, '__len__'): + sentences = [sentences] + input_was_string = True + + if device is not None: + self.to(device) + + # TODO: Maybe use better length heuristic? + permutation = np.argsort([-len(i) for i in sentences]) + inverse_permutation = np.argsort(permutation) + sentences = [sentences[idx] for idx in permutation] + + tokenizer_kwargs['padding'] = tokenizer_kwargs.get('padding', True) + tokenizer_kwargs['max_length'] = tokenizer_kwargs.get('max_length', 8192) + tokenizer_kwargs['truncation'] = tokenizer_kwargs.get('truncation', True) + + all_embeddings = [] + + if has_tqdm: + range_iter = trange( + 0, + len(sentences), + batch_size, + desc="Encoding", + disable=not show_progress_bar, + ) + else: + range_iter = range(0, len(sentences), batch_size) + + for i in range_iter: + encoded_input = self.tokenizer( + sentences[i : i + batch_size], + return_tensors='pt', + **tokenizer_kwargs, + ).to(self.device) + token_embs = self.forward(**encoded_input)[0] + + # Accumulate in fp32 to avoid overflow + token_embs = token_embs.float() + + if output_value == 'token_embeddings': + raise NotImplementedError + elif output_value is None: + raise NotImplementedError + else: + embeddings = self.mean_pooling( + token_embs, encoded_input['attention_mask'] + ) + + if normalize_embeddings: + embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) + + if convert_to_numpy: + embeddings = embeddings.cpu() + all_embeddings.extend(embeddings) + + all_embeddings = [all_embeddings[idx] for idx in inverse_permutation] + + if convert_to_tensor: + all_embeddings = torch.stack(all_embeddings) + elif convert_to_numpy: + all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings]) + + if input_was_string: + all_embeddings = all_embeddings[0] + + self.train(is_training) + return all_embeddings + + def mean_pooling( + self, token_embeddings: torch.Tensor, attention_mask: torch.Tensor + ): + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9 + ) + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + # self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] if past_key_values is not None else 0 + ) + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), device=device + ) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + batch_size, seq_length + ) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=device + ) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape + ) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None + ) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + BERT_START_DOCSTRING, +) +class JinaBertForPreTraining(JinaBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + + self.bert = JinaBertModel(config) + self.cls = JinaBertPreTrainingHeads(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=JinaBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + next_sentence_label: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], JinaBertForPreTrainingOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), + the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence + pair (see `input_ids` docstring) Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + + Returns: + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls( + sequence_output, pooled_output + ) + + total_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + next_sentence_loss = loss_fct( + seq_relationship_score.view(-1, 2), next_sentence_label.view(-1) + ) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return JinaBertForPreTrainingOutput( + loss=total_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """JinaBert Model with a `language modeling` head on top for CLM fine-tuning.""", + BERT_START_DOCSTRING, +) +class JinaBertLMHeadModel(JinaBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning( + "If you want to use `JinaBertLMHeadModel` as a standalone, add `is_decoder=True.`" + ) + + self.bert = JinaBertModel(config, add_pooling_layer=False) + self.cls = JinaBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + use_cache=True, + **model_kwargs, + ): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx) for past_state in layer_past + ), + ) + return reordered_past + + +@add_start_docstrings( + """JinaBert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING +) +class JinaBertForMaskedLM(JinaBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `JinaBertForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.bert = JinaBertModel(config, add_pooling_layer=False) + self.cls = JinaBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'paris'", + expected_loss=0.88, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ( + ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + ) + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, attention_mask=None, **model_kwargs + ): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + if self.config.pad_token_id is None: + raise ValueError("The PAD token should be defined for generation") + + attention_mask = torch.cat( + [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], + dim=-1, + ) + dummy_token = torch.full( + (effective_batch_size, 1), + self.config.pad_token_id, + dtype=torch.long, + device=input_ids.device, + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +@add_start_docstrings( + """JinaBert Model with a `next sentence prediction (classification)` head on top.""", + BERT_START_DOCSTRING, +) +class JinaBertForNextSentencePrediction(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = JinaBertModel(config) + self.cls = JinaBertOnlyNSPHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see `input_ids` docstring). Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + + Returns: + """ + + if "next_sentence_label" in kwargs: + warnings.warn( + "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" + " `labels` instead.", + FutureWarning, + ) + labels = kwargs.pop("next_sentence_label") + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + seq_relationship_scores = self.cls(pooled_output) + + next_sentence_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + next_sentence_loss = loss_fct( + seq_relationship_scores.view(-1, 2), labels.view(-1) + ) + + if not return_dict: + output = (seq_relationship_scores,) + outputs[2:] + return ( + ((next_sentence_loss,) + output) + if next_sentence_loss is not None + else output + ) + + return NextSentencePredictorOutput( + loss=next_sentence_loss, + logits=seq_relationship_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + BERT_START_DOCSTRING, +) +class JinaBertForSequenceClassification(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.bert = JinaBertModel(config) + classifier_dropout = ( + config.classifier_dropout + if config.classifier_dropout is not None + else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and ( + labels.dtype == torch.long or labels.dtype == torch.int + ): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + BERT_START_DOCSTRING, +) +class JinaBertForMultipleChoice(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = JinaBertModel(config) + classifier_dropout = ( + config.classifier_dropout + if config.classifier_dropout is not None + else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + num_choices = ( + input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + ) + + input_ids = ( + input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + ) + attention_mask = ( + attention_mask.view(-1, attention_mask.size(-1)) + if attention_mask is not None + else None + ) + token_type_ids = ( + token_type_ids.view(-1, token_type_ids.size(-1)) + if token_type_ids is not None + else None + ) + position_ids = ( + position_ids.view(-1, position_ids.size(-1)) + if position_ids is not None + else None + ) + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + BERT_START_DOCSTRING, +) +class JinaBertForTokenClassification(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = JinaBertModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout + if config.classifier_dropout is not None + else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, + expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + JinaBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BERT_START_DOCSTRING, +) +class JinaBertForQuestionAnswering(JinaBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = JinaBertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_QA, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + qa_target_start_index=_QA_TARGET_START_INDEX, + qa_target_end_index=_QA_TARGET_END_INDEX, + expected_output=_QA_EXPECTED_OUTPUT, + expected_loss=_QA_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + start_positions: Optional[torch.Tensor] = None, + end_positions: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + diff --git a/models/models--jinaai--jina-embeddings-v2-base-en/blobs/6b70f1386f05b9703ea4edf7f1550a8925399f9580e4cc754cc099efc1e736d8 b/models/models--jinaai--jina-embeddings-v2-base-en/blobs/6b70f1386f05b9703ea4edf7f1550a8925399f9580e4cc754cc099efc1e736d8 new file mode 100644 index 0000000000000000000000000000000000000000..28b58d88034764895f18c6e9c2834d6e2fdf1254 --- /dev/null +++ b/models/models--jinaai--jina-embeddings-v2-base-en/blobs/6b70f1386f05b9703ea4edf7f1550a8925399f9580e4cc754cc099efc1e736d8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b70f1386f05b9703ea4edf7f1550a8925399f9580e4cc754cc099efc1e736d8 +size 274757256 diff --git a/models/models--jinaai--jina-embeddings-v2-base-en/blobs/d0dfed01bd337522ff8f838b48ebc4c1d15f8c12 b/models/models--jinaai--jina-embeddings-v2-base-en/blobs/d0dfed01bd337522ff8f838b48ebc4c1d15f8c12 new file mode 100644 index 0000000000000000000000000000000000000000..d0dfed01bd337522ff8f838b48ebc4c1d15f8c12 --- /dev/null +++ b/models/models--jinaai--jina-embeddings-v2-base-en/blobs/d0dfed01bd337522ff8f838b48ebc4c1d15f8c12 @@ -0,0 +1,35 @@ +{ + "_name_or_path": "jinaai/jina-bert-implementation", + "model_max_length": 8192, + "architectures": [ + "JinaBertForMaskedLM" + ], + "attention_probs_dropout_prob": 0.0, + "auto_map": { + "AutoConfig": "jinaai/jina-bert-implementation--configuration_bert.JinaBertConfig", + "AutoModelForMaskedLM": "jinaai/jina-bert-implementation--modeling_bert.JinaBertForMaskedLM", + "AutoModel": "jinaai/jina-bert-implementation--modeling_bert.JinaBertModel", + "AutoModelForSequenceClassification": "jinaai/jina-bert-implementation--modeling_bert.JinaBertForSequenceClassification" + }, + "classifier_dropout": null, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 8192, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 0, + "position_embedding_type": "alibi", + "torch_dtype": "float32", + "transformers_version": "4.26.0", + "type_vocab_size": 2, + "use_cache": true, + "vocab_size": 30528, + "feed_forward_type": "geglu", + "emb_pooler": "mean" +} diff --git a/models/models--jinaai--jina-embeddings-v2-base-en/refs/main b/models/models--jinaai--jina-embeddings-v2-base-en/refs/main new file mode 100644 index 0000000000000000000000000000000000000000..80426def486adb20c73acce342af056332aeef7e --- /dev/null +++ b/models/models--jinaai--jina-embeddings-v2-base-en/refs/main @@ -0,0 +1 @@ +f84569a8c478c7bb2ea3bc187034859870940deb \ No newline at end of file diff --git a/models/models--jinaai--jina-embeddings-v2-base-en/snapshots/f84569a8c478c7bb2ea3bc187034859870940deb/config.json b/models/models--jinaai--jina-embeddings-v2-base-en/snapshots/f84569a8c478c7bb2ea3bc187034859870940deb/config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0dfed01bd337522ff8f838b48ebc4c1d15f8c12 --- /dev/null +++ b/models/models--jinaai--jina-embeddings-v2-base-en/snapshots/f84569a8c478c7bb2ea3bc187034859870940deb/config.json @@ -0,0 +1,35 @@ +{ + "_name_or_path": "jinaai/jina-bert-implementation", + "model_max_length": 8192, + "architectures": [ + "JinaBertForMaskedLM" + ], + "attention_probs_dropout_prob": 0.0, + "auto_map": { + "AutoConfig": "jinaai/jina-bert-implementation--configuration_bert.JinaBertConfig", + "AutoModelForMaskedLM": "jinaai/jina-bert-implementation--modeling_bert.JinaBertForMaskedLM", + "AutoModel": "jinaai/jina-bert-implementation--modeling_bert.JinaBertModel", + "AutoModelForSequenceClassification": "jinaai/jina-bert-implementation--modeling_bert.JinaBertForSequenceClassification" + }, + "classifier_dropout": null, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 8192, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 0, + "position_embedding_type": "alibi", + "torch_dtype": "float32", + "transformers_version": "4.26.0", + "type_vocab_size": 2, + "use_cache": true, + "vocab_size": 30528, + "feed_forward_type": "geglu", + "emb_pooler": "mean" +} diff --git a/models/models--jinaai--jina-embeddings-v2-base-en/snapshots/f84569a8c478c7bb2ea3bc187034859870940deb/model.safetensors b/models/models--jinaai--jina-embeddings-v2-base-en/snapshots/f84569a8c478c7bb2ea3bc187034859870940deb/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..28b58d88034764895f18c6e9c2834d6e2fdf1254 --- /dev/null +++ b/models/models--jinaai--jina-embeddings-v2-base-en/snapshots/f84569a8c478c7bb2ea3bc187034859870940deb/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b70f1386f05b9703ea4edf7f1550a8925399f9580e4cc754cc099efc1e736d8 +size 274757256 diff --git a/users/__pycache__/__init__.cpython-311.pyc b/users/__pycache__/__init__.cpython-311.pyc index 125c48a70a0e40c372bc5d3d5a6af06900de454b..c829522a70bf957cfa5edc92709b21d0a5d996a7 100644 Binary files a/users/__pycache__/__init__.cpython-311.pyc and b/users/__pycache__/__init__.cpython-311.pyc differ diff --git a/users/__pycache__/admin.cpython-311.pyc b/users/__pycache__/admin.cpython-311.pyc index c06bd24d0f3035a4e93b4db67d96cd7d9667704c..e813cc61a27a95b0ed0cc2d83303f2a5edfce255 100644 Binary files a/users/__pycache__/admin.cpython-311.pyc and b/users/__pycache__/admin.cpython-311.pyc differ diff --git a/users/__pycache__/apps.cpython-311.pyc b/users/__pycache__/apps.cpython-311.pyc index 4e6c4d3a9c9d4ccba4593a5a9bdaf74b5e571130..c737d8600d799562a335700627153048cfd075b4 100644 Binary files a/users/__pycache__/apps.cpython-311.pyc and b/users/__pycache__/apps.cpython-311.pyc differ diff --git a/users/__pycache__/urls.cpython-311.pyc b/users/__pycache__/urls.cpython-311.pyc index 45aca2165aea7e7fcaae1ff0147a9de92021e834..85cfa7291273fed8a4b0450331f34eee2f6c58d7 100644 Binary files a/users/__pycache__/urls.cpython-311.pyc and b/users/__pycache__/urls.cpython-311.pyc differ diff --git a/users/__pycache__/views.cpython-311.pyc b/users/__pycache__/views.cpython-311.pyc index 7d1aeeab4d05826cb42febc657ce76714d6c5570..3c1bad7e38f04330c9a4be50cca54af50b7bf97d 100644 Binary files a/users/__pycache__/views.cpython-311.pyc and b/users/__pycache__/views.cpython-311.pyc differ diff --git a/users/admin.py b/users/admin.py index 8c38f3f3dad51e4585f3984282c2a4bec5349c1e..ea5d68b7c457cb7f92da9c00a5c4df77ace36cef 100644 --- a/users/admin.py +++ b/users/admin.py @@ -1,3 +1,3 @@ -from django.contrib import admin - -# Register your models here. +from django.contrib import admin + +# Register your models here. diff --git a/users/apps.py b/users/apps.py index 72b1401065bff121d2e3c31231a5ad2dd5239171..80638568b41b1541d6dd3b22afb2139dbec4a8b1 100644 --- a/users/apps.py +++ b/users/apps.py @@ -1,6 +1,6 @@ -from django.apps import AppConfig - - -class UsersConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'users' +from django.apps import AppConfig + + +class UsersConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'users' diff --git a/users/migrations/__pycache__/__init__.cpython-311.pyc b/users/migrations/__pycache__/__init__.cpython-311.pyc index 44a8a49af7ba29061cd5a9fc5fac044e792c440b..d2545568669baf319a9f687dfa92276a675a8600 100644 Binary files a/users/migrations/__pycache__/__init__.cpython-311.pyc and b/users/migrations/__pycache__/__init__.cpython-311.pyc differ diff --git a/users/templates/users/login.html b/users/templates/users/login.html index ad6922cd38237066b5322860d246b169000ebbf3..4f9a6567eb61a5584f9e2c66320cc95ee0ae8161 100644 --- a/users/templates/users/login.html +++ b/users/templates/users/login.html @@ -1,73 +1,73 @@ -<!DOCTYPE html> -<html lang="en"> -<head> - <meta charset="UTF-8"> - <meta name="viewport" content="width=device-width, initial-scale=1.0"> - <title>User Login - - - -
-

User Login

-
- {% csrf_token %} - {{ form }} - {% if request.GET.next %} - - {% endif %} - -
-
- - + + + + + + User Login + + + +
+

User Login

+
+ {% csrf_token %} + {{ form }} + {% if request.GET.next %} + + {% endif %} + +
+
+ + diff --git a/users/templates/users/register.html b/users/templates/users/register.html index 4c13690c77cfbf105acc3e954a9e5cfebb4c092b..ef1be7c45fe01aa6f93923f9616a502aaeb825d1 100644 --- a/users/templates/users/register.html +++ b/users/templates/users/register.html @@ -1,88 +1,88 @@ - - -{% block content %} - - - -
-

Register a New User

-
- {% csrf_token %} -
- - {{ form.username }} -
-
- - {{ form.password1 }} -
-
- - {{ form.password2 }} -
- -
-
-{% endblock %} + + +{% block content %} + + + +
+

Register a New User

+
+ {% csrf_token %} +
+ + {{ form.username }} +
+
+ + {{ form.password1 }} +
+
+ + {{ form.password2 }} +
+ +
+
+{% endblock %} diff --git a/users/tests.py b/users/tests.py index 7ce503c2dd97ba78597f6ff6e4393132753573f6..de8bdc00eb2fed53494a534d48e400faa830dbd9 100644 --- a/users/tests.py +++ b/users/tests.py @@ -1,3 +1,3 @@ -from django.test import TestCase - -# Create your tests here. +from django.test import TestCase + +# Create your tests here. diff --git a/users/urls.py b/users/urls.py index 4a8a8bf3f9e0cdc89e554051699e8da29fc15a42..eccf5a6f1a203db0cabcdda1bbfbad4bb93a6d87 100644 --- a/users/urls.py +++ b/users/urls.py @@ -1,11 +1,11 @@ -from django.urls import path -from . import views - -app_name = 'users' - -urlpatterns = [ - path('register/', views.register_view, name="register"), - path('login/', views.login_view, name="login"), - path('logout/', views.logout_view, name="logout"), - # path('', views.index_view, name = "index") -] +from django.urls import path +from . import views + +app_name = 'users' + +urlpatterns = [ + path('register/', views.register_view, name="register"), + path('login/', views.login_view, name="login"), + path('logout/', views.logout_view, name="logout"), + # path('', views.index_view, name = "index") +] diff --git a/users/views.py b/users/views.py index c7321bf83269b92d67767567f703ccf3c2c89e14..2686f91d0d3ccfb341b3dd749463e2bd66a1bff6 100644 --- a/users/views.py +++ b/users/views.py @@ -1,39 +1,39 @@ -from django.shortcuts import render, redirect -from django.contrib.auth.forms import UserCreationForm, AuthenticationForm -from django.contrib.auth import login, logout -from chat.models import Chat, User -# Create your views here. -def register_view(request): - if request.method == "POST": - form = UserCreationForm(request.POST) - if form.is_valid(): - login(request, form.save()) - return redirect("chat:room") - else: - form = UserCreationForm() - return render(request, "users/register.html", { "form": form }) - -def login_view(request): - if request.user.is_authenticated: - return redirect("chat:room") - if request.method == "POST": - form = AuthenticationForm(data=request.POST) - if form.is_valid(): - login(request, form.get_user()) - #Get chat history of user - - if 'next' in request.POST: - return redirect(request.POST.get('next')) - else: - return redirect("chat:room") - else: - form = AuthenticationForm() - return render(request, "users/login.html", { "form": form }) - -def logout_view(request): - if request.method == "POST": - logout(request) - return redirect("chat:room") - -def index_view(request): +from django.shortcuts import render, redirect +from django.contrib.auth.forms import UserCreationForm, AuthenticationForm +from django.contrib.auth import login, logout +# from chat.models import Chat, User +# Create your views here. +def register_view(request): + if request.method == "POST": + form = UserCreationForm(request.POST) + if form.is_valid(): + login(request, form.save()) + return redirect("chat:room") + else: + form = UserCreationForm() + return render(request, "users/register.html", { "form": form }) + +def login_view(request): + if request.user.is_authenticated: + return redirect("../chat/room.html") + if request.method == "POST": + form = AuthenticationForm(data=request.POST) + if form.is_valid(): + login(request, form.get_user()) + #Get chat history of user + + if 'next' in request.POST: + return redirect(request.POST.get('next')) + else: + return redirect("chat.views.room") + else: + form = AuthenticationForm() + return render(request, "users/login.html", { "form": form }) + +def logout_view(request): + if request.method == "POST": + logout(request) + return redirect("chat:room") + +def index_view(request): return render(request, "users/index.html") \ No newline at end of file