howey commited on
Commit
9b55278
1 Parent(s): 489cd12

remove unrelated files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -104
  2. .gitignore +0 -162
  3. BENCHMARKING.md +0 -190
  4. LICENSE +0 -21
  5. LICENSE.md +0 -201
  6. README.md +1 -96
  7. adapter_fusion.py +0 -105
  8. benchmark.sh +0 -2
  9. bert_pals.py +0 -861
  10. evaluation/EVALUATION.md +0 -131
  11. evaluation/INFERENCE.md +0 -98
  12. evaluation/embeddings_generator.py +0 -53
  13. evaluation/encoders.py +0 -320
  14. evaluation/eval_datasets.py +0 -96
  15. evaluation/evaluator.py +0 -228
  16. evaluation/few_shot_evaluator.py +0 -58
  17. evaluation/gpt3_encoder.py +0 -30
  18. evaluation/instructor.py +0 -25
  19. examples/classification.py +0 -24
  20. examples/fewshot_classification.py +0 -23
  21. examples/regression.py +0 -23
  22. examples/retrieval.py +0 -39
  23. full_scirepeval_tasks.jsonl +0 -17
  24. htrans/__init__.py +0 -0
  25. htrans/act_fns.py +0 -205
  26. htrans/embedding.py +0 -272
  27. htrans/model/__init__.py +0 -2
  28. htrans/model/configuration_htrans.py +0 -130
  29. htrans/model/modeling_htrans.py +0 -1283
  30. htrans/norms.py +0 -52
  31. htrans/pytorch_utils.py +0 -276
  32. mdcr.py +0 -58
  33. requirements.txt +0 -100
  34. reviewer_matching.py +0 -65
  35. s2and_embeddings.py +0 -56
  36. scirepeval.py +0 -159
  37. scirepeval_tasks.jsonl +0 -22
  38. super_scirep.jsonl +0 -16
  39. training/TRAINING.md +0 -138
  40. training/bert_pals_config/low_rank_config.json +0 -15
  41. training/bert_pals_config/pals.config.json +0 -16
  42. training/mtl_datasets.py +0 -311
  43. training/pl_training.py +0 -325
  44. training/sample_data/fos_labels.txt +0 -23
  45. training/sample_data/fos_small.json +0 -0
  46. training/sample_data/mesh_descriptors.txt +0 -30
  47. training/sample_data/mesh_small.json +0 -0
  48. training/sample_data/s2and_small.json +0 -0
  49. training/sample_data/search_small.jsonl +0 -0
  50. training/sample_data/specter_small.json +0 -0
.gitattributes DELETED
@@ -1,104 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
55
- feeds_m/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
56
- fos/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
57
- paper_reviewer_matching/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
58
- search/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
59
- tweet_mentions/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
60
- cite_count/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
61
- feeds_1/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
62
- feeds_title/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
63
- high_influence_cite/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
64
- peer_review_score_hIndex/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
65
- pub_year/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
66
- scidocs_view_cite_read/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
67
- search/validation/state.json filter=lfs diff=lfs merge=lfs -text
68
- cite_count/validation/state.json filter=lfs diff=lfs merge=lfs -text
69
- feeds_title/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
70
- fos/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
71
- pub_year/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
72
- pub_year/validation/state.json filter=lfs diff=lfs merge=lfs -text
73
- feeds_m/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
74
- fos/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
75
- fos/validation/state.json filter=lfs diff=lfs merge=lfs -text
76
- search/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
77
- feeds_m/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
78
- pub_year/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
79
- search/train/state.json filter=lfs diff=lfs merge=lfs -text
80
- tweet_mentions/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
81
- feeds_1/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
82
- fos/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
83
- fos/train/state.json filter=lfs diff=lfs merge=lfs -text
84
- paper_reviewer_matching/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
85
- pub_year/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
86
- tweet_mentions/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
87
- cite_count/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
88
- peer_review_score_hIndex/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
89
- pub_year/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
90
- search/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
91
- search/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
92
- peer_review_score_hIndex/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
93
- pub_year/train/state.json filter=lfs diff=lfs merge=lfs -text
94
- cite_count/train/state.json filter=lfs diff=lfs merge=lfs -text
95
- cite_count/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
96
- feeds_1/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
97
- feeds_title/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
98
- fos/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
99
- cite_count/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
100
- cite_count/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
101
- paper_reviewer_matching/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
102
- scidocs_view_cite_read/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
103
- scidocs_view_cite_read/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
104
- search/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,162 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # pdm
105
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
- #pdm.lock
107
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
- # in version control.
109
- # https://pdm.fming.dev/#use-with-ide
110
- .pdm.toml
111
-
112
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
- __pypackages__/
114
-
115
- # Celery stuff
116
- celerybeat-schedule
117
- celerybeat.pid
118
-
119
- # SageMath parsed files
120
- *.sage.py
121
-
122
- # Environments
123
- .env
124
- .venv
125
- env/
126
- venv/
127
- ENV/
128
- env.bak/
129
- venv.bak/
130
-
131
- # Spyder project settings
132
- .spyderproject
133
- .spyproject
134
-
135
- # Rope project settings
136
- .ropeproject
137
-
138
- # mkdocs documentation
139
- /site
140
-
141
- # mypy
142
- .mypy_cache/
143
- .dmypy.json
144
- dmypy.json
145
-
146
- # Pyre type checker
147
- .pyre/
148
-
149
- # pytype static type analyzer
150
- .pytype/
151
-
152
- # Cython debug symbols
153
- cython_debug/
154
- huggingface_hub/
155
- # PyCharm
156
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
- # and can be added to the global gitignore or merged into this file. For a more nuclear
159
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
- .idea/
161
- results/
162
- data/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
BENCHMARKING.md DELETED
@@ -1,190 +0,0 @@
1
- ## Benchmarking
2
- We provide our trained models on the HuggingFace models [hub](https://huggingface.co/models?search=scirepeval) to replicate the results in Table 2 from the paper.
3
-
4
- |Model|In-Train|Out-of-Train|SciDocs|Average|
5
- |--|--|--|--|--|
6
- |[SciBERT](https://huggingface.co/allenai/scibert_scivocab_uncased)|51.5|52.5|69.0|58.2|
7
- |[SPECTER](https://huggingface.co/allenai/specter)|54.7|57.4|89.1|68.0|
8
- |[SciNCL](https://huggingface.co/malteos/scincl)|55.6|57.8|**90.8**|69.0|
9
- |SciNCL + MTL CLS|60.1|56.6|89.6|69.3|
10
- |[SciNCL + MTL CTRL](https://huggingface.co/allenai/scirepeval_ctrl)|62.1|57.7|89.9|70.3|
11
- |[SciNCL PALs](https://huggingface.co/allenai/scirepeval_pals)|62.3|58.4|90.0|70.7|
12
- |SciNCL Adapters ([CLF](https://huggingface.co/allenai/scirepeval_adapters_clf), [QRY](https://huggingface.co/allenai/scirepeval_adapters_qry), [RGN](https://huggingface.co/allenai/scirepeval_adapters_rgn), [PRX](https://huggingface.co/allenai/scirepeval_adapters_prx))|61.9|**59.0**|90.3|70.9|
13
- |[SciNCL Adapters Fusion](https://us-east-1.console.aws.amazon.com/s3/buckets/ai2-s2-research-public?region=us-west-2&prefix=scirepeval/adapters/&showversions=false)|62.0|58.6|89.9|70.6|
14
- |SciNCL Adapters + MTL CTRL|**62.5**|58.9|**90.7**|**71.2**|
15
-
16
- The detailed, task-wise results for all our experiments can be found [here](https://docs.google.com/spreadsheets/d/1JMq-jR4M8KU119cvglUDmMwwzd60Z3vyvn3VqhPn9EY/view#gid=1450677429?usp=sharing).
17
-
18
- We provide a test script - [scirepeval.py](https://github.com/allenai/scirepeval/blob/main/scirepeval.py) to evaluate one of the above models or a custom trained model on all the tasks in the benchmark.
19
- The tasks can be configured as required in [scirepeval_tasks.jsonl](https://github.com/allenai/scirepeval/blob/main/scirepeval_tasks.jsonl).
20
-
21
- The following are used as task ids in the code and serve as either control codes or module identifiers for each task type:
22
-
23
- ``TASK_IDS = {"classification": "[CLF]", "regression": "[RGN]", "proximity": "[PRX]",
24
- "adhoc_search": {"query": "[QRY]", "candidates": "[PRX]"}}``
25
-
26
- Execute one of the following commands to evaluate a model on SciRepEval:
27
- <a name="models"></a>
28
-
29
- **Base/MTL CLS**
30
- ```bash
31
- python scirepeval.py -m allenai/specter
32
- ```
33
- **MTL CTRL**
34
- ```bash
35
- python scirepeval.py -m allenai/scirepeval_ctrl --ctrl-tokens
36
- ```
37
- **PALs**
38
- ```bash
39
- python scirepeval.py --mtype pals -m allenai/scirepeval_pals
40
- ```
41
- **Adapters**
42
- ```bash
43
- python scirepeval.py --mtype adapters -m malteos/scincl --adapters-dir <local checkpoint directory with adapter module weights>
44
- OR
45
- python scirepeval.py --mtype adapters -m malteos/scincl --adapters-chkpt '{"[CLF]": "allenai/scirepeval_adapters_clf", "[QRY]": "allenai/scirepeval_adapters_qry", "[RGN]": "allenai/scirepeval_adapters_rgn", "[PRX]": "allenai/scirepeval_adapters_prx"}'
46
- ```
47
-
48
- **Fusion**
49
- ```bash
50
- python scirepeval.py --mtype fusion -m <huggingface base model name/local checkpoint path> --adapters-dir <local checkpoint directory with adapter module weights> --fusion-dir <local checkpoint directory with fusion module weights>
51
- OR
52
- python scirepeval.py --mtype fusion -m <huggingface base model name/local checkpoint path> --adapters-chkpt '{"[CLF]": "allenai/scirepeval_adapters_clf", "[QRY]": "allenai/scirepeval_adapters_qry", "[RGN]": "allenai/scirepeval_adapters_rgn", "[PRX]": "allenai/scirepeval_adapters_prx"}' --fusion-dir <local checkpoint directory with fusion module weights>
53
-
54
- ```
55
-
56
- **Open AI Embeddings**
57
-
58
- We provide additional option for evaluating [Open AI](https://platform.openai.com/docs/guides/embeddings/use-cases) embeddings on SciRepEval.
59
- If you have an Open AI license key, set it as an environment variable.
60
- ```bash
61
- export OPENAI_API_KEY=<open ai api key>
62
- python scirepeval.py --gpt3-model text-embedding-ada-002
63
- ```
64
-
65
- **Instructor**
66
-
67
- You can also evaluate the [Instructor](https://instructor-embedding.github.io/) models available on Hugging Face.
68
- The prompts for each task format are present in [instructor.py](https://github.com/allenai/scirepeval/blob/main/evaluation/instructor.py).
69
-
70
- ```bash
71
- python scirepeval.py --instructor -m hkunlp/instructor-large
72
- ```
73
-
74
-
75
- The script generates embeddings and evaluates on each task as per the metric mentioned in the paper. By default the result report is created in `<ROOT>/scirepeval_results.json`
76
-
77
- ### Sample Report
78
- ```json
79
- {
80
- "Biomimicry": {
81
- "complete": {
82
- "f1": 71.18
83
- },
84
- "few_shot": [
85
- {
86
- "sample_size": 64,
87
- "results": {
88
- "f1": 38.514
89
- }
90
- },
91
- {
92
- "sample_size": 16,
93
- "results": {
94
- "f1": 22.3444
95
- }
96
- }
97
- ]
98
- },
99
- "DRSM": {
100
- "complete": {
101
- "f1_macro": 76.36
102
- },
103
- "few_shot": [
104
- {
105
- "sample_size": 64,
106
- "results": {
107
- "f1_macro": 61.842000000000006
108
- }
109
- },
110
- {
111
- "sample_size": 24,
112
- "results": {
113
- "f1_macro": 53.21420000000001
114
- }
115
- }
116
- ]
117
- },
118
- "Feeds-1": {
119
- "map": 81.03
120
- },
121
- "Feeds Title": {
122
- "map": 78.85
123
- }
124
- }
125
- ```
126
-
127
- <a name="s2and"></a>
128
- ### S2AND evaluation
129
- S2AND evaluation requires the data to be cached locally in a specific format. We provide a helper script to generate the document representations for S2AND before evaluating them.
130
-
131
- **Step 1**
132
-
133
- Obtain the data from AWS S3:
134
- ```bash
135
- mkdir s2and && cd s2and
136
- aws s3 --no-sign-request sync s3://ai2-s2-research-public/scirepeval/test/s2and .
137
- ```
138
- **Step 2**
139
-
140
- Generate Embeddings for all the paper blocks. The various model parameters are same as scirepeval.py, provide those to initialize the required model type.
141
- ```bash
142
- python s2and_embeddings.py --mtype <model type> -m <model checkpoint> --adapters-dir <adapters dir or chkpt> --data-dir <path to S2AND data> --suffix <suffix for embedding file name>
143
- ```
144
- **Step 3**
145
-
146
- Run S2AND evaluation.
147
- Setup S2AND as in [repo](https://github.com/allenai/S2AND) and change the configuration to point to your data location.
148
-
149
- Run the following command:
150
- ```bash
151
- python scripts/custom_block_transfer_experiment_seed_paper.py --custom_block_path <data>/blocks --experiment_name mini_customblock_phantasm_v1 --exclude_medline --emb_suffix _<suffix>.pkl
152
- ```
153
- ### Filtering Tasks
154
- #### By Name
155
- ```python
156
- from scirepeval import SciRepEval
157
- from evaluation.encoders import Model
158
-
159
- #Base/MTL CLS
160
- model = Model(variant="default", base_checkpoint="allenai/specter")
161
-
162
- #MTL CTRL
163
- model = Model(variant="default", base_checkpoint="allenai/scirepeval_ctrl", use_ctrl_codes=True)
164
-
165
- #PALs
166
- model = Model(variant="pals", base_checkpoint="allenai/scirepeval_pals", all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
167
-
168
- #Adapters/Fusion
169
- adapters_dict = {"[CLF]": "allenai/scirepeval_adapters_clf", "[QRY]": "allenai/scirepeval_adapters_qry", "[RGN]": "allenai/scirepeval_adapters_rgn", "[PRX]": "allenai/scirepeval_adapters_prx"}
170
- model = Model(variant=<"adapters"|"fusion">, base_checkpoint="malteos/scincl", adapters_load_from=adapters_dict, all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
171
-
172
- #Choose the task names from scirepeval_tasks.jsonl
173
- evaluator = SciRepEval(task_list=["Biomimicry", "DRSM", "TREC-CoVID", "Feeds-1"])
174
- evaluator.evaluate(model, "scirepeval_results.json")
175
- ```
176
-
177
- #### By Task Type
178
- ```python
179
- from scirepeval import SciRepEval
180
- from evaluation.encoders import Model
181
-
182
- #Create a model instance as in previous example
183
- model = Model(variant="default", base_checkpoint="allenai/specter")
184
-
185
- #Choose the task types from (classification, regression, proximity and adhoc_search)
186
- evaluator = SciRepEval(task_formats=["classification", "regression"])
187
- evaluator.evaluate(model, "scirepeval_results.json")
188
- ```
189
-
190
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LICENSE DELETED
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2023 Autonomous Vision Group
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LICENSE.md DELETED
@@ -1,201 +0,0 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- http://www.apache.org/licenses/
4
-
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
-
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "[]"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright [yyyy] [name of copyright owner]
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- http://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,98 +1,3 @@
1
- # SciRepEval: A Multi-Format Benchmark for Scientific Document Representations
2
- This repo contains the code to train, evaluate and reproduce the representation learning models and results on the benchmark introduced in [SciRepEval](https://api.semanticscholar.org/CorpusID:254018137).
3
 
4
- ## Quick Setup
5
- Clone the repo and setup the environment as follows:
6
- ```bash
7
- git clone git@github.com:allenai/scirepeval.git
8
- cd scirepeval
9
- conda create -n scirepeval python=3.8
10
- conda activate scirepeval
11
- pip install -r requirements.txt
12
- ```
13
- ## Usage
14
- Please refer to the following for further usage:
15
-
16
- [Training](https://github.com/allenai/scirepeval/blob/main/training/TRAINING.md) - Train multi-task/multi-format transformer models or adapter modules
17
-
18
- [Inference](https://github.com/allenai/scirepeval/blob/main/evaluation/INFERENCE.md) - Using the trained SciRepEval models to generate embeddings.
19
-
20
- [Evaluation](https://github.com/allenai/scirepeval/blob/main/evaluation/EVALUATION.md) - Evaluate trained models on custom tasks OR customize existing evaluation config for SciRepEval benchmark tasks
21
-
22
- [Benchmarking](https://github.com/allenai/scirepeval/blob/main/BENCHMARKING.md) - Simply evaluate models(pretrained from HuggingFace/local checkpoints) on SciRepEval and generate a report
23
-
24
- ## Benchmark Details
25
- SciRepEval consists of 25 scientific document tasks to train and evaluate scientific document representation models. The tasks are divided across 4 task formats- classification **CLF**, regression **RGN**, proximity (nearest neighbors) retrieval **PRX** and ad-hoc search **SRCH**. The table below gives a brief overview of the tasks with their HuggingFace datasets config names, if applicable.
26
- The benchmark dataset can be downloaded from AWS S3 or HuggingFace as follows:
27
- #### AWS S3 via CLI
28
- ```bash
29
- mkdir scirepeval_data && mkdir scirepeval_data/train && mkdir scirepeval_data/test && cd scirepeval_data
30
- aws s3 --no-sign-request sync s3://ai2-s2-research-public/scirepeval/train train
31
- aws s3 --no-sign-request sync s3://ai2-s2-research-public/scirepeval/test test
32
- ```
33
- The AWS CLI commands can be run with the `--dryrun` flag to list the files being copied. The entire dataset is ~24 GB in size.
34
-
35
- #### HuggingFace Datasets
36
- The training, validation and raw evaluation data is available at [allenai/scirepeval](https://huggingface.co/datasets/allenai/scirepeval), while the labelled test examples are available at [allenai/scirepeval_test](https://huggingface.co/datasets/allenai/scirepeval_test).
37
-
38
- ```python
39
- import datasets
40
- #training/validation/eval metadata
41
- dataset = datasets.load_dataset(allenai/scirepeval, <hf config name>)
42
-
43
- #labelled test examples
44
- dataset = datasets.load_dataset(allenai/scirepeval_test, <hf config name>)
45
- ```
46
-
47
- Since we want to evaluate document representations, every dataset consists of two parts: test metadata (text for representation generation available under allenai/scirepeval) and labelled examples (available under allenai/scirepeval_test)
48
-
49
- |Format|Name|Train|Metric|HF Config| HF Test Config|
50
- |--|--|--|--|--|--|
51
- |CLF|[MeSH Descriptors](https://www.nlm.nih.gov/databases/download/terms_and_conditions_mesh.html)|Y|F1 Macro|[mesh_descriptors](https://huggingface.co/datasets/allenai/scirepeval/viewer/mesh_descriptors)|[mesh_descriptors](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/mesh_descriptors)|
52
- |CLF|Fields of study|Y|F1 Macro|[fos](https://huggingface.co/datasets/allenai/scirepeval/viewer/fos)|[fos](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/fos)|
53
- |CLF|[Biomimicry](https://github.com/nasa-petal/PeTaL-db)|N|F1 Binary|[biomimicry](https://huggingface.co/datasets/allenai/scirepeval/viewer/biomimicry)|[biomimicry](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/biomimicry)|
54
- |CLF|[DRSM](https://github.com/chanzuckerberg/DRSM-corpus)|N|F1 Macro|[drsm](https://huggingface.co/datasets/allenai/scirepeval/viewer/drsm)|[drsm](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/drsm)|
55
- |CLF|[SciDocs-MAG](https://github.com/allenai/scidocs)|N|F1 Macro|[scidocs_mag_mesh](https://huggingface.co/datasets/allenai/scirepeval/viewer/scidocs_mag_mesh)|[scidocs_mag](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/scidocs_mag)|
56
- |CLF|[SciDocs-Mesh Diseases](https://github.com/allenai/scidocs)|N|F1 Macro|[scidocs_mag_mesh](https://huggingface.co/datasets/allenai/scirepeval/viewer/scidocs_mesh)|[scidocs_mesh](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/scidocs_mag_mesh)|
57
- |RGN|Citation Count|Y|Kendall's Tau|[cite_count](https://huggingface.co/datasets/allenai/scirepeval/viewer/cite_count)|[cite_count](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/cite_count)|
58
- |RGN|Year of Publication|Y|Kendall's Tau|[pub_year](https://huggingface.co/datasets/allenai/scirepeval/viewer/pub_year)|[pub_year](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/pub_year)|
59
- |RGN|[Peer Review Score](https://api.openreview.net)|N|Kendall's Tau|[peer_review_score_hIndex](https://huggingface.co/datasets/allenai/scirepeval/viewer/peer_review_score_hIndex)|[peer_review_score](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/peer_review_score)|
60
- |RGN|[Max Author hIndex](https://api.openreview.net)|N|Kendall's Tau|[peer_review_score_hIndex](https://huggingface.co/datasets/allenai/scirepeval/viewer/peer_review_score_hIndex)|[hIndex](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/hIndex)|
61
- |RGN|[Tweet Mentions](https://github.com/lingo-iitgn/TweetPap)|N|Kendall's Tau|[tweet_mentions](https://huggingface.co/datasets/allenai/scirepeval/viewer/tweet_mentions)|[tweet_mentions](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/tweet_mentions)|
62
- |PRX|Same Author Detection|Y|MAP|[same_author](https://huggingface.co/datasets/allenai/scirepeval/viewer/same_author)|[same_author](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/same_author)|
63
- |PRX|Highly Influential Citations|Y|MAP|[high_influence_cite](https://huggingface.co/datasets/allenai/scirepeval/viewer/high_influence_cite)|[high_influence_cite](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/high_influence_cite)|
64
- |PRX|Citation Prediction|Y|-|[cite_prediction](https://huggingface.co/datasets/allenai/scirepeval/viewer/cite_prediction)|-|
65
- |PRX|S2AND*|N|B^3 F1|-|-|
66
- |PRX|Paper-Reviewer Matching**|N|Precision@5,10|[paper_reviewer_matching](https://huggingface.co/datasets/allenai/scirepeval/viewer/paper_reviewer_matching)|[paper_reviewer_matching](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/paper_reviewer_matching), [reviewers](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/reviewers)|
67
- |PRX|Feeds-1|N|MAP|[feeds_1](https://huggingface.co/datasets/allenai/scirepeval/viewer/feeds_1)|[feeds_1](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/feeds_1)|
68
- |PRX|Feeds-M|N|MAP|[feeds_m](https://huggingface.co/datasets/allenai/scirepeval/viewer/feeds_m)|[feeds_m](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/feeds_m)|
69
- |PRX|[SciDocs-Cite](https://github.com/allenai/scidocs)|N|MAP, NDCG|[scidocs_view_cite_read](https://huggingface.co/datasets/allenai/scirepeval/viewer/scidocs_view_cite_read)|[scidocs_cite](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/scidocs_cite)|
70
- |PRX|[SciDocs-CoCite](https://github.com/allenai/scidocs)|N|MAP, NDCG|[scidocs_view_cite_read](https://huggingface.co/datasets/allenai/scirepeval/viewer/scidocs_view_cite_read)|[scidocs_cocite](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/scidocs_cocite)|
71
- |PRX|[SciDocs-CoView](https://github.com/allenai/scidocs)|N|MAP, NDCG|[scidocs_view_cite_read](https://huggingface.co/datasets/allenai/scirepeval/viewer/scidocs_view_cite_read)|[scidocs_view](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/scidocs_view)|
72
- |PRX|[SciDocs-CoRead](https://github.com/allenai/scidocs)|N|MAP, NDCG|[scidocs_view_cite_read](https://huggingface.co/datasets/allenai/scirepeval/viewer/scidocs_view_cite_read)|[scidocs_read](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/scidocs_read)|
73
- |SRCH|Search|Y|NDCG|[search](https://huggingface.co/datasets/allenai/scirepeval/viewer/search)|[search](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/search)|
74
- |SRCH|Feeds-Title|N|MAP|[feeds_title](https://huggingface.co/datasets/allenai/scirepeval/viewer/feeds_title)|[feeds_title](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/feeds_title)|
75
- |SRCH|[TREC-CoVID](https://ir.nist.gov/trec-covid/data.html)|N|NDCG|[trec_covid](https://huggingface.co/datasets/allenai/scirepeval/viewer/trec_covid)|[trec_covid](https://huggingface.co/datasets/allenai/scirepeval_test/viewer/trec_covid)|
76
-
77
- *S2AND requires the evaluation dataset in a specific format so to evaluate your model on the task please follow [these](https://github.com/allenai/scirepeval/blob/main/BENCHMARKING.md#s2and) instructions.
78
-
79
- **Combinations of multiple datasets - [1](https://mimno.infosci.cornell.edu/data/nips_reviewer_data.tar.gz), [2](https://web.archive.org/web/20211015210300/http://sifaka.cs.uiuc.edu/ir/data/review.html), [3](https://ieee-dataport.org/open-access/retrorevmatchevalicip16-retrospective-reviewer-matching-dataset-and-evaluation-ieee-icip), also dataset of papers authored by potential reviewers is required for evaluation; hence the multiple dataset configs.
80
-
81
- ## License
82
- The aggregate benchmark is released under [ODC-BY](https://opendatacommons.org/licenses/by/1.0/) license. By downloading this data you acknowledge that you have read and agreed to all the terms in this license.
83
- For constituent datasets, also go through the individual licensing requirements, as applicable.
84
-
85
- ## Citation
86
-
87
- Please cite the SciRepEval work as:
88
-
89
- ```bibtex
90
- @article{Singh2022SciRepEvalAM,
91
- title={SciRepEval: A Multi-Format Benchmark for Scientific Document Representations},
92
- author={Amanpreet Singh and Mike D'Arcy and Arman Cohan and Doug Downey and Sergey Feldman},
93
- journal={ArXiv},
94
- year={2022},
95
- volume={abs/2211.13308}
96
- }
97
- ```
98
 
 
1
+ # SuperSciRep: A Multi-Format Benchmark for Full-text Scientific Document Representations
 
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
adapter_fusion.py DELETED
@@ -1,105 +0,0 @@
1
- from typing import List, Optional, Union, Dict
2
- from transformers.adapters import AutoAdapterModel
3
- from transformers.adapters.composition import Fuse
4
- from abc import ABC, abstractmethod
5
- import torch
6
- import os
7
-
8
-
9
- class AdapterFactory:
10
- @staticmethod
11
- def get_adapter(checkpoint_name: str, task_ids: List[str], fuse_adapters: bool,
12
- adapters_dir: Union[str, Dict] = None):
13
- print(task_ids)
14
- if not fuse_adapters:
15
- return AdapterEncoder(checkpoint_name, task_ids)
16
- else:
17
- return AdapterFusion(checkpoint_name, task_ids, adapters_dir)
18
-
19
-
20
- class AbstractAdapter(torch.nn.Module, ABC):
21
- def __init__(self, checkpoint_name):
22
- super(AbstractAdapter, self).__init__()
23
- self.model = AutoAdapterModel.from_pretrained(checkpoint_name) # checkpoint
24
-
25
- @abstractmethod
26
- def save_pretrained(self, save_path: str):
27
- self.model.save_all_adapters(save_path)
28
-
29
- def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
30
- return self.model.resize_token_embeddings(new_num_tokens)
31
-
32
-
33
- class AdapterEncoder(AbstractAdapter):
34
- def __init__(self, checkpoint_name, task_ids: List[str], load_as=None):
35
- super(AdapterEncoder, self).__init__(checkpoint_name)
36
- # Add a new adapter
37
- for t_id in task_ids:
38
- if not load_as:
39
- self.model.add_adapter(t_id, config="pfeiffer")
40
- else:
41
- # load_as can str for a local path or dict to be loaded from adapters hub
42
- if type(load_as) == str:
43
- self.model.load_adapter(f"{load_as}/{t_id}/", load_as=t_id)
44
- else:
45
- self.model.load_adapter(load_as[t_id], load_as=t_id)
46
- self.model.train_adapter(adapter_setup=task_ids, train_embeddings=False)
47
-
48
- def forward(self, input_ids, attention_mask, task_id):
49
- self.model.base_model.set_active_adapters(task_id)
50
- return self.model(input_ids, attention_mask=attention_mask)
51
-
52
- def save_pretrained(self, save_path: str, adapter_names: List[str] = None):
53
- # self.model.save_pretrained(save_path)
54
- save_path = f'{save_path}/adapters/'
55
- os.makedirs(save_path, exist_ok=True)
56
- if not adapter_names:
57
- self.model.save_all_adapters(save_path)
58
- else:
59
- for a_name in adapter_names:
60
- self.model.save_adapter(f"{save_path}/{a_name}/", a_name)
61
-
62
-
63
- class AdapterFusion(AbstractAdapter):
64
- def __init__(self, checkpoint_name, task_ids: List[str], load_adapters_as: Union[str, dict], fusion_dir: str = None,
65
- inference=False):
66
- super(AdapterFusion, self).__init__(checkpoint_name)
67
- # Add a new adapter
68
- # load_adapters_as can str for a local path with adapters and fusion dirs or dict to be loaded from adapters hub,
69
- # the adapters hub version of single adapters should have the suffix _fusion
70
- if not fusion_dir:
71
- fusion_dir = load_adapters_as.replace("/adapters/", "") if inference and type(
72
- load_adapters_as) == str else None
73
- load_adapters_as = load_adapters_as.replace("fusion", "adapters") if type(
74
- load_adapters_as) == str else load_adapters_as
75
- for t_id in task_ids:
76
- if type(load_adapters_as) == str and os.path.isdir(load_adapters_as):
77
- self.model.load_adapter(f"{load_adapters_as}/{t_id}/", load_as=t_id)
78
- else:
79
- self.model.load_adapter(load_adapters_as[t_id], load_as=t_id)
80
- self.fusion_mods_dict = dict()
81
- for i, t_id in enumerate(task_ids):
82
- task_fuse = Fuse(*([t_id] + task_ids[:i] + task_ids[i + 1:]))
83
- self.fusion_mods_dict[t_id] = task_fuse
84
- if not inference:
85
- self.model.add_adapter_fusion(task_fuse)
86
- else:
87
- if fusion_dir:
88
- self.model.load_adapter_fusion(f"{fusion_dir}/{t_id}_fusion/")
89
- else:
90
- self.model.load_adapter_fusion(f"{load_adapters_as[t_id]}_fusion")
91
- self.model.train_adapter_fusion(list(self.fusion_mods_dict.values()))
92
- print(self.model.active_adapters)
93
- # self.model.get_input_embeddings().train()
94
- # self.model.train_adapter(adapter_setup=task_ids, train_embeddings=True)
95
-
96
- def forward(self, input_ids, attention_mask, task_id):
97
- self.model.base_model.set_active_adapters(self.fusion_mods_dict[task_id])
98
- return self.model(input_ids, attention_mask=attention_mask)
99
-
100
- def save_pretrained(self, save_path: str):
101
- # self.model.save_pretrained(save_path)
102
- from pathlib import Path
103
- Path(save_path).mkdir(parents=True, exist_ok=True)
104
- for t_id, t_fuse in self.fusion_mods_dict.items():
105
- self.model.save_adapter_fusion(f'{save_path}/{t_id}_fusion/', t_fuse)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
benchmark.sh DELETED
@@ -1,2 +0,0 @@
1
- python scirepeval.py --tasks-config full_scirepeval_tasks.jsonl -m allenai/longformer-base-4096 --batch-size 1 --output longformer_results.json --document
2
- python scirepeval.py --tasks-config full_scirepeval_tasks.jsonl -m /home/haoyu/code/academic-budget-LMs/logs/htrans/runs/2023-05-02_23-18-37/huggingface_saved --batch-size 2 --output 32_32_8_6_results.json --htrans --document
 
 
 
bert_pals.py DELETED
@@ -1,861 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """PyTorch BERT model."""
16
-
17
- from __future__ import absolute_import
18
- from __future__ import division
19
- from __future__ import print_function
20
-
21
- import copy
22
- import json
23
- import math
24
- from typing import List, Optional
25
-
26
- import os
27
- import six
28
- import torch
29
- import torch.nn as nn
30
- import torch.nn.functional as F
31
- from torch.nn import CrossEntropyLoss, MSELoss
32
- from torch.nn.parameter import Parameter
33
- from transformers.models.bert.modeling_bert import BertPreTrainedModel
34
- from transformers.models.bert.configuration_bert import BertConfig
35
-
36
-
37
- def gelu(x):
38
- """Implementation of the gelu activation function.
39
- For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
40
- 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
41
- """
42
- return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
43
-
44
-
45
- class BertPalConfig(BertConfig):
46
- """Configuration class to store the configuration of a `BertModel`.
47
- """
48
-
49
- def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12, num_attention_heads=12,
50
- intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1,
51
- max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, pals=False, mult=False,
52
- top=False, lhuc=False, houlsby=False, bert_lay_top=False, num_tasks=1, extra_dim=None,
53
- hidden_size_aug=204, **kwargs):
54
- """Constructs BertConfig.
55
-
56
- Args:
57
- vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
58
- hidden_size: Size of the encoder layers and the pooler layer.
59
- num_hidden_layers: Number of hidden layers in the Transformer encoder.
60
- num_attention_heads: Number of attention heads for each attention layer in
61
- the Transformer encoder.
62
- intermediate_size: The size of the "intermediate" (i.e., feed-forward)
63
- layer in the Transformer encoder.
64
- hidden_act: The non-linear activation function (function or string) in the
65
- encoder and pooler.
66
- hidden_dropout_prob: The dropout probabilitiy for all fully connected
67
- layers in the embeddings, encoder, and pooler.
68
- attention_probs_dropout_prob: The dropout ratio for the attention
69
- probabilities.
70
- max_position_embeddings: The maximum sequence length that this model might
71
- ever be used with. Typically set this to something large just in case
72
- (e.g., 512 or 1024 or 2048).
73
- type_vocab_size: The vocabulary size of the `token_type_ids` passed into
74
- `BertModel`.
75
- initializer_range: The sttdev of the truncated_normal_initializer for
76
- initializing all weight matrices.
77
- """
78
- super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act,
79
- hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size,
80
- initializer_range, **kwargs)
81
- self.vocab_size = vocab_size
82
- self.hidden_size = hidden_size
83
- self.num_hidden_layers = num_hidden_layers
84
- self.num_attention_heads = num_attention_heads
85
- self.hidden_act = hidden_act
86
- self.intermediate_size = intermediate_size
87
- self.hidden_dropout_prob = hidden_dropout_prob
88
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
89
- self.max_position_embeddings = max_position_embeddings
90
- self.type_vocab_size = type_vocab_size
91
- self.initializer_range = initializer_range
92
- self.hidden_size_aug = hidden_size_aug
93
- self.pals = pals
94
- self.extra_dim = extra_dim
95
- self.houlsby = houlsby
96
- self.mult = mult
97
- self.top = top
98
- self.bert_lay_top = bert_lay_top
99
- self.lhuc = lhuc
100
- self.num_tasks = num_tasks
101
-
102
- @classmethod
103
- def from_json_file(cls, json_file):
104
- """Constructs a `BertConfig` from a json file of parameters."""
105
- with open(json_file, "r") as reader:
106
- text = reader.read()
107
- return cls.from_dict(json.loads(text))
108
-
109
- def to_dict(self):
110
- """Serializes this instance to a Python dictionary."""
111
- output = copy.deepcopy(self.__dict__)
112
- return output
113
-
114
- @classmethod
115
- def from_dict(cls, json_object):
116
- """Constructs a `BertConfig` from a Python dictionary of parameters."""
117
- config = BertPalConfig(vocab_size=None)
118
- for (key, value) in six.iteritems(json_object):
119
- config.__dict__[key] = value
120
- return config
121
-
122
- def to_json_string(self, use_diff: bool = True):
123
- """Serializes this instance to a JSON string."""
124
- return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
125
-
126
-
127
- class BERTLayerNorm(nn.Module):
128
- def __init__(self, config, multi_params=None, variance_epsilon=1e-12):
129
- """Construct a layernorm module in the TF style (epsilon inside the square root).
130
- """
131
- super(BERTLayerNorm, self).__init__()
132
- if multi_params is not None:
133
- self.weight = nn.Parameter(torch.ones(config.hidden_size_aug))
134
- self.bias = nn.Parameter(torch.zeros(config.hidden_size_aug))
135
- else:
136
- self.weight = nn.Parameter(torch.ones(config.hidden_size))
137
- self.bias = nn.Parameter(torch.zeros(config.hidden_size))
138
- self.variance_epsilon = variance_epsilon
139
-
140
- def forward(self, x):
141
- u = x.mean(-1, keepdim=True)
142
- s = (x - u).pow(2).mean(-1, keepdim=True)
143
- x = (x - u) / torch.sqrt(s + self.variance_epsilon)
144
- return self.weight * x + self.bias
145
-
146
-
147
- class BERTEmbeddings(nn.Module):
148
- def __init__(self, config):
149
- super(BERTEmbeddings, self).__init__()
150
- """Construct the embedding module from word, position and token_type embeddings.
151
- """
152
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
153
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
154
- self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
155
-
156
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
157
- # any TensorFlow checkpoint file
158
- self.LayerNorm = BERTLayerNorm(config)
159
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
160
-
161
- def forward(self, input_ids, token_type_ids=None):
162
- seq_length = input_ids.size(1)
163
- position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
164
- position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
165
- if token_type_ids is None:
166
- token_type_ids = torch.zeros_like(input_ids)
167
-
168
- words_embeddings = self.word_embeddings(input_ids)
169
- position_embeddings = self.position_embeddings(position_ids)
170
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
171
-
172
- embeddings = words_embeddings + position_embeddings + token_type_embeddings
173
- embeddings = self.LayerNorm(embeddings)
174
- embeddings = self.dropout(embeddings)
175
- return embeddings
176
-
177
-
178
- class BERTSelfAttention(nn.Module):
179
- def __init__(self, config, multi_params=None):
180
- super(BERTSelfAttention, self).__init__()
181
- if config.hidden_size % config.num_attention_heads != 0:
182
- raise ValueError(
183
- "The hidden size (%d) is not a multiple of the number of attention "
184
- "heads (%d)" % (config.hidden_size, config.num_attention_heads))
185
- if multi_params is not None:
186
- self.num_attention_heads = multi_params
187
- self.attention_head_size = int(config.hidden_size_aug / self.num_attention_heads)
188
- self.all_head_size = self.num_attention_heads * self.attention_head_size
189
- hidden_size = config.hidden_size_aug
190
- else:
191
- self.num_attention_heads = config.num_attention_heads
192
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
193
- self.all_head_size = self.num_attention_heads * self.attention_head_size
194
- hidden_size = config.hidden_size
195
-
196
- self.query = nn.Linear(hidden_size, self.all_head_size)
197
- self.key = nn.Linear(hidden_size, self.all_head_size)
198
- self.value = nn.Linear(hidden_size, self.all_head_size)
199
-
200
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
201
-
202
- def transpose_for_scores(self, x):
203
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
204
- x = x.view(*new_x_shape)
205
- return x.permute(0, 2, 1, 3)
206
-
207
- def forward(self, hidden_states, attention_mask):
208
- mixed_query_layer = self.query(hidden_states)
209
- mixed_key_layer = self.key(hidden_states)
210
- mixed_value_layer = self.value(hidden_states)
211
-
212
- query_layer = self.transpose_for_scores(mixed_query_layer)
213
- key_layer = self.transpose_for_scores(mixed_key_layer)
214
- value_layer = self.transpose_for_scores(mixed_value_layer)
215
-
216
- # Take the dot product between "query" and "key" to get the raw attention scores.
217
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
218
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
219
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
220
- attention_scores = attention_scores + attention_mask
221
-
222
- # Normalize the attention scores to probabilities.
223
- attention_probs = nn.Softmax(dim=-1)(attention_scores)
224
-
225
- # This is actually dropping out entire tokens to attend to, which might
226
- # seem a bit unusual, but is taken from the original Transformer paper.
227
- attention_probs = self.dropout(attention_probs)
228
-
229
- context_layer = torch.matmul(attention_probs, value_layer)
230
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
231
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
232
- context_layer = context_layer.view(*new_context_layer_shape)
233
- return context_layer
234
-
235
-
236
- class BERTMultSelfOutput(nn.Module):
237
- def __init__(self, config, multi_params=None):
238
- super(BERTMultSelfOutput, self).__init__()
239
- self.LayerNorm = BERTLayerNorm(config, multi_params)
240
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
241
-
242
- def forward(self, hidden_states, input_tensor):
243
- hidden_states = self.dropout(hidden_states)
244
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
245
- return hidden_states
246
-
247
-
248
- class BERTSelfOutput(nn.Module):
249
- def __init__(self, config, multi_params=None, houlsby=False):
250
- super(BERTSelfOutput, self).__init__()
251
- if houlsby:
252
- multi = BERTLowRank(config)
253
- self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
254
- if multi_params is not None:
255
- self.dense = nn.Linear(config.hidden_size_aug, config.hidden_size_aug)
256
- else:
257
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
258
- self.LayerNorm = BERTLayerNorm(config, multi_params)
259
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
260
- self.houlsby = houlsby
261
-
262
- def forward(self, hidden_states, input_tensor, attention_mask=None, i=0):
263
- hidden_states = self.dense(hidden_states)
264
- hidden_states = self.dropout(hidden_states)
265
- if self.houlsby:
266
- hidden_states = hidden_states + self.multi_layers[i](hidden_states, attention_mask)
267
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
268
- return hidden_states
269
-
270
-
271
- class BERTAttention(nn.Module):
272
- def __init__(self, config, multi_params=None, houlsby=False):
273
- super(BERTAttention, self).__init__()
274
- self.self = BERTSelfAttention(config, multi_params)
275
- self.output = BERTSelfOutput(config, multi_params, houlsby)
276
-
277
- def forward(self, input_tensor, attention_mask, i=0):
278
- self_output = self.self(input_tensor, attention_mask)
279
- attention_output = self.output(self_output, input_tensor, attention_mask, i=i)
280
- return attention_output
281
-
282
-
283
- class BERTPals(nn.Module):
284
- def __init__(self, config, extra_dim=None):
285
- super(BERTPals, self).__init__()
286
- # Encoder and decoder matrices project down to the smaller dimension
287
- self.aug_dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
288
- self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
289
- # Attention without the final matrix multiply.
290
- self.attn = BERTSelfAttention(config, 6)
291
- self.config = config
292
- self.hidden_act_fn = gelu
293
-
294
- def forward(self, hidden_states, attention_mask=None):
295
- hidden_states_aug = self.aug_dense(hidden_states)
296
- hidden_states_aug = self.attn(hidden_states_aug, attention_mask)
297
- hidden_states = self.aug_dense2(hidden_states_aug)
298
- hidden_states = self.hidden_act_fn(hidden_states)
299
- return hidden_states
300
-
301
-
302
- class BERTLowRank(nn.Module):
303
- def __init__(self, config, extra_dim=None):
304
- super(BERTLowRank, self).__init__()
305
- # Encoder and decoder matrices project down to the smaller dimension
306
- if config.extra_dim:
307
- self.aug_dense = nn.Linear(config.hidden_size, config.extra_dim)
308
- self.aug_dense2 = nn.Linear(config.extra_dim, config.hidden_size)
309
- else:
310
- self.aug_dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
311
- self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
312
- self.config = config
313
- self.hidden_act_fn = gelu
314
-
315
- def forward(self, hidden_states, attention_mask=None):
316
- hidden_states_aug = self.aug_dense(hidden_states)
317
- hidden_states_aug = self.hidden_act_fn(hidden_states_aug)
318
- hidden_states = self.aug_dense2(hidden_states_aug)
319
- return hidden_states
320
-
321
-
322
- class BERTIntermediate(nn.Module):
323
- def __init__(self, config):
324
- super(BERTIntermediate, self).__init__()
325
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
326
- self.config = config
327
- self.intermediate_act_fn = gelu
328
-
329
- def forward(self, hidden_states):
330
- hidden_states = self.dense(hidden_states)
331
- hidden_states = self.intermediate_act_fn(hidden_states)
332
- return hidden_states
333
-
334
-
335
- class BERTLhuc(nn.Module):
336
- def __init__(self, config):
337
- super(BERTLhuc, self).__init__()
338
- self.lhuc = Parameter(torch.zeros(config.hidden_size))
339
-
340
- def forward(self, hidden_states):
341
- hidden_states = hidden_states * 2. * nn.functional.sigmoid(self.lhuc)
342
- return hidden_states
343
-
344
-
345
- class BERTOutput(nn.Module):
346
- def __init__(self, config, houlsby=False):
347
- super(BERTOutput, self).__init__()
348
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
349
- self.LayerNorm = BERTLayerNorm(config)
350
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
351
- if houlsby:
352
- if config.pals:
353
- multi = BERTPals(config)
354
- else:
355
- multi = BERTLowRank(config)
356
- self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
357
- self.houlsby = houlsby
358
-
359
- def forward(self, hidden_states, input_tensor, attention_mask=None, i=0):
360
- hidden_states = self.dense(hidden_states)
361
- hidden_states = self.dropout(hidden_states)
362
- if self.houlsby:
363
- hidden_states = hidden_states + self.multi_layers[i](input_tensor, attention_mask)
364
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
365
- return hidden_states
366
-
367
-
368
- class BERTLayer(nn.Module):
369
- def __init__(self, config, mult=False, houlsby=False):
370
- super(BERTLayer, self).__init__()
371
- self.attention = BERTAttention(config, houlsby=houlsby)
372
- self.intermediate = BERTIntermediate(config)
373
- self.output = BERTOutput(config, houlsby=houlsby)
374
- if config.lhuc:
375
- lhuc = BERTLhuc(config)
376
- self.multi_lhuc = nn.ModuleList([copy.deepcopy(lhuc) for _ in range(config.num_tasks)])
377
- if mult:
378
- if config.pals:
379
- multi = BERTPals(config)
380
- else:
381
- multi = BERTLowRank(config)
382
- self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
383
- self.mult = mult
384
- self.lhuc = config.lhuc
385
- self.houlsby = houlsby
386
-
387
- def forward(self, hidden_states, attention_mask, i=0):
388
- attention_output = self.attention(hidden_states, attention_mask, i)
389
- intermediate_output = self.intermediate(attention_output)
390
- if self.lhuc and not self.mult:
391
- layer_output = self.output(intermediate_output, attention_output)
392
- layer_output = self.multi_lhuc[i](layer_output)
393
- elif self.mult:
394
- extra = self.multi_layers[i](hidden_states, attention_mask)
395
- if self.lhuc:
396
- extra = self.multi_lhuc[i](extra)
397
- layer_output = self.output(intermediate_output, attention_output + extra)
398
- elif self.houlsby:
399
- layer_output = self.output(intermediate_output, attention_output, attention_mask, i)
400
- else:
401
- layer_output = self.output(intermediate_output, attention_output)
402
- return layer_output
403
-
404
-
405
- class BERTEncoder(nn.Module):
406
- def __init__(self, config):
407
- super(BERTEncoder, self).__init__()
408
- self.config = config
409
- if config.houlsby:
410
- # Adjust line below to add PALs etc. to different layers. True means add a PAL.
411
- self.multis = [True if i < 999 else False for i in range(config.num_hidden_layers)]
412
- self.layer = nn.ModuleList([BERTLayer(config, houlsby=mult) for mult in self.multis])
413
- elif config.mult:
414
- # Adjust line below to add PALs etc. to different layers. True means add a PAL.
415
- self.multis = [True if i < 999 else False for i in range(config.num_hidden_layers)]
416
- self.layer = nn.ModuleList([BERTLayer(config, mult=mult) for mult in self.multis])
417
- else:
418
- layer = BERTLayer(config)
419
- self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
420
-
421
- if config.top:
422
- if config.bert_lay_top:
423
- multi = BERTLayer(config)
424
- else:
425
- # Projection matrices and attention for adding to the top.
426
- mult_dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
427
- self.mult_dense = nn.ModuleList([copy.deepcopy(mult_dense) for _ in range(config.num_tasks)])
428
- mult_dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
429
- self.mult_dense2 = nn.ModuleList([copy.deepcopy(mult_dense2) for _ in range(config.num_tasks)])
430
- multi = nn.ModuleList([copy.deepcopy(BERTAttention(config, 12)) for _ in range(6)])
431
-
432
- self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
433
- self.gelu = gelu
434
-
435
- if config.mult and config.pals:
436
- dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
437
- # Shared encoder and decoder across layers
438
- self.mult_aug_dense = nn.ModuleList([copy.deepcopy(dense) for _ in range(config.num_tasks)])
439
- dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
440
- self.mult_aug_dense2 = nn.ModuleList([copy.deepcopy(dense2) for _ in range(config.num_tasks)])
441
- for l, layer in enumerate(self.layer):
442
- if self.multis[l]:
443
- for i, lay in enumerate(layer.multi_layers):
444
- lay.aug_dense = self.mult_aug_dense[i]
445
- lay.aug_dense2 = self.mult_aug_dense2[i]
446
- if config.houlsby and config.pals:
447
- dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
448
- # Shared encoder and decoder across layers
449
- self.mult_aug_dense = nn.ModuleList([copy.deepcopy(dense) for _ in range(config.num_tasks)])
450
- dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
451
- self.mult_aug_dense2 = nn.ModuleList([copy.deepcopy(dense2) for _ in range(config.num_tasks)])
452
- dense3 = nn.Linear(config.hidden_size, config.hidden_size_aug)
453
- for l, layer in enumerate(self.layer):
454
- if self.multis[l]:
455
- for i, lay in enumerate(layer.output.multi_layers):
456
- lay.aug_dense = self.mult_aug_dense[i]
457
- lay.aug_dense2 = self.mult_aug_dense2[i]
458
-
459
- def forward(self, hidden_states, attention_mask, i=0):
460
- all_encoder_layers = []
461
- for layer_module in self.layer:
462
- hidden_states = layer_module(hidden_states, attention_mask, i)
463
- all_encoder_layers.append(hidden_states)
464
- if self.config.top:
465
- if self.config.bert_lay_top:
466
- all_encoder_layers[-1] = self.multi_layers[i](hidden_states, attention_mask)
467
- else:
468
- hidden_states = self.mult_dense[i](hidden_states)
469
- for lay in self.multi_layers[i]:
470
- hidden_states = lay(hidden_states, attention_mask)
471
- all_encoder_layers[-1] = self.mult_dense2[i](hidden_states)
472
- return all_encoder_layers
473
-
474
-
475
- class BERTPooler(nn.Module):
476
- def __init__(self, config):
477
- super(BERTPooler, self).__init__()
478
-
479
- dense = nn.Linear(config.hidden_size, config.hidden_size)
480
- self.activation = nn.Tanh()
481
- self.pool = False
482
- if self.pool:
483
- self.mult_dense_layers = nn.ModuleList([copy.deepcopy(dense) for _ in range(config.num_tasks)])
484
- else:
485
- self.dense = dense
486
- self.mult = config.mult
487
- self.top = config.top
488
-
489
- def forward(self, hidden_states, i=0):
490
- # We "pool" the model by simply taking the hidden state corresponding
491
- # to the first token.
492
- first_token_tensor = hidden_states[:, 0]
493
- if (self.mult or self.top) and self.pool:
494
- pooled_output = self.mult_dense_layers[i](first_token_tensor)
495
- else:
496
- pooled_output = self.dense(first_token_tensor)
497
- pooled_output = self.activation(pooled_output)
498
- return pooled_output
499
-
500
-
501
- class BertModel(BertPreTrainedModel):
502
- """BERT model ("Bidirectional Embedding Representations from a Transformer").
503
-
504
- Example usage:
505
- ```python
506
- # Already been converted into WordPiece token ids
507
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
508
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
509
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
510
-
511
- config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
512
- num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
513
-
514
- model = modeling.BertModel(config=config)
515
- all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
516
- ```
517
- """
518
-
519
- def __init__(self, config: BertPalConfig):
520
- """Constructor for BertModel.
521
-
522
- Args:
523
- config: `BertConfig` instance.
524
- """
525
- super(BertModel, self).__init__(config)
526
- self.embeddings = BERTEmbeddings(config)
527
- self.encoder = BERTEncoder(config)
528
- self.pooler = BERTPooler(config)
529
-
530
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, i=0):
531
- if attention_mask is None:
532
- attention_mask = torch.ones_like(input_ids)
533
- if token_type_ids is None:
534
- token_type_ids = torch.zeros_like(input_ids)
535
-
536
- # We create a 3D attention mask from a 2D tensor mask.
537
- # Sizes are [batch_size, 1, 1, from_seq_length]
538
- # So we can broadcast to [batch_size, num_heads, to_seq_length, from_seq_length]
539
- # this attention mask is more simple than the triangular masking of causal attention
540
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
541
- extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
542
-
543
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
544
- # masked positions, this operation will create a tensor which is 0.0 for
545
- # positions we want to attend and -10000.0 for masked positions.
546
- # Since we are adding it to the raw scores before the softmax, this is
547
- # effectively the same as removing these entirely.
548
- extended_attention_mask = extended_attention_mask.float()
549
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
550
-
551
- embedding_output = self.embeddings(input_ids, token_type_ids)
552
- all_encoder_layers = self.encoder(embedding_output, extended_attention_mask, i)
553
- sequence_output = all_encoder_layers[-1]
554
- pooled_output = self.pooler(sequence_output, i)
555
- return all_encoder_layers, pooled_output
556
-
557
- def get_input_embeddings(self):
558
- return self.embeddings.word_embeddings
559
-
560
- def set_input_embeddings(self, value):
561
- self.embeddings.word_embeddings = value
562
-
563
-
564
- class BertForMultiTask(nn.Module):
565
- """BERT model for classification or regression on GLUE tasks (STS-B is treated as a regression task).
566
- This module is composed of the BERT model with a linear layer on top of
567
- the pooled output.
568
-
569
- ```
570
- """
571
-
572
- def __init__(self, config, tasks):
573
- super(BertForMultiTask, self).__init__()
574
- self.bert = BertModel(config)
575
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
576
- self.classifier = nn.ModuleList([nn.Linear(config.hidden_size, num_labels)
577
- for i, num_labels in enumerate(tasks)])
578
-
579
- def init_weights(module):
580
- if isinstance(module, (nn.Linear, nn.Embedding)):
581
- # Slightly different from the TF version which uses truncated_normal for initialization
582
- # cf https://github.com/pytorch/pytorch/pull/5617
583
- module.weight.data.normal_(mean=0.0, std=config.initializer_range)
584
- elif isinstance(module, BERTLayerNorm):
585
- module.beta.data.normal_(mean=0.0, std=config.initializer_range)
586
- module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
587
- if isinstance(module, nn.Linear):
588
- if module.bias is not None:
589
- module.bias.data.zero_()
590
-
591
- self.apply(init_weights)
592
-
593
- def forward(self, input_ids, token_type_ids, attention_mask, task_id, name='cola', labels=None):
594
- _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, task_id)
595
- pooled_output = self.dropout(pooled_output)
596
- logits = self.classifier[task_id](pooled_output)
597
-
598
- if labels is not None and name != 'sts':
599
- loss_fct = CrossEntropyLoss()
600
- loss = loss_fct(logits, labels)
601
- return loss, logits
602
- # STS is a regression task.
603
- elif labels is not None and name == 'sts':
604
- loss_fct = MSELoss()
605
- loss = loss_fct(logits, labels.unsqueeze(1))
606
- return loss, logits
607
- else:
608
- return logits
609
-
610
-
611
- class BertForSequenceClassification(nn.Module):
612
- """BERT model for classification.
613
- This module is composed of the BERT model with a linear layer on top of
614
- the pooled output.
615
-
616
- Example usage:
617
- ```python
618
- # Already been converted into WordPiece token ids
619
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
620
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
621
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
622
-
623
- config = BertConfig(vocab_size=32000, hidden_size=512,
624
- num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
625
-
626
- num_labels = 2
627
-
628
- model = BertForSequenceClassification(config, num_labels)
629
- logits = model(input_ids, token_type_ids, input_mask)
630
- ```
631
- """
632
-
633
- def __init__(self, config, num_labels):
634
- super(BertForSequenceClassification, self).__init__()
635
- self.bert = BertModel(config)
636
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
637
- self.classifier = nn.Linear(config.hidden_size, num_labels)
638
-
639
- def init_weights(module):
640
- if isinstance(module, (nn.Linear, nn.Embedding)):
641
- # Slightly different from the TF version which uses truncated_normal for initialization
642
- # cf https://github.com/pytorch/pytorch/pull/5617
643
- module.weight.data.normal_(mean=0.0, std=config.initializer_range)
644
- elif isinstance(module, BERTLayerNorm):
645
- module.beta.data.normal_(mean=0.0, std=config.initializer_range)
646
- module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
647
- if isinstance(module, nn.Linear):
648
- if module.bias is not None:
649
- module.bias.data.zero_()
650
-
651
- self.apply(init_weights)
652
-
653
- def forward(self, input_ids, token_type_ids, attention_mask, labels=None):
654
- _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
655
- pooled_output = self.dropout(pooled_output)
656
- logits = self.classifier(pooled_output)
657
-
658
- if labels is not None:
659
- loss_fct = CrossEntropyLoss()
660
- loss = loss_fct(logits, labels)
661
- return loss, logits
662
- else:
663
- return logits
664
-
665
-
666
- class BertForQuestionAnswering(nn.Module):
667
- """BERT model for Question Answering (span extraction).
668
- This module is composed of the BERT model with a linear layer on top of
669
- the sequence output that computes start_logits and end_logits
670
-
671
- Example usage:
672
- ```python
673
- # Already been converted into WordPiece token ids
674
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
675
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
676
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
677
-
678
- config = BertConfig(vocab_size=32000, hidden_size=512,
679
- num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
680
-
681
- model = BertForQuestionAnswering(config)
682
- start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
683
- ```
684
- """
685
-
686
- def __init__(self, config):
687
- super(BertForQuestionAnswering, self).__init__()
688
- self.bert = BertModel(config)
689
- # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
690
- # self.dropout = nn.Dropout(config.hidden_dropout_prob)
691
- self.qa_outputs = nn.Linear(config.hidden_size, 2)
692
-
693
- def init_weights(module):
694
- if isinstance(module, (nn.Linear, nn.Embedding)):
695
- # Slightly different from the TF version which uses truncated_normal for initialization
696
- # cf https://github.com/pytorch/pytorch/pull/5617
697
- module.weight.data.normal_(mean=0.0, std=config.initializer_range)
698
- elif isinstance(module, BERTLayerNorm):
699
- module.beta.data.normal_(mean=0.0, std=config.initializer_range)
700
- module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
701
- if isinstance(module, nn.Linear):
702
- module.bias.data.zero_()
703
-
704
- self.apply(init_weights)
705
-
706
- def forward(self, input_ids, token_type_ids, attention_mask, start_positions=None, end_positions=None):
707
- all_encoder_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
708
- sequence_output = all_encoder_layers[-1]
709
- logits = self.qa_outputs(sequence_output)
710
- start_logits, end_logits = logits.split(1, dim=-1)
711
- start_logits = start_logits.squeeze(-1)
712
- end_logits = end_logits.squeeze(-1)
713
-
714
- if start_positions is not None and end_positions is not None:
715
- # If we are on multi-GPU, split add a dimension - if not this is a no-op
716
- start_positions = start_positions.squeeze(-1)
717
- end_positions = end_positions.squeeze(-1)
718
- # sometimes the start/end positions are outside our model inputs, we ignore these terms
719
- ignored_index = start_logits.size(1)
720
- start_positions.clamp_(0, ignored_index)
721
- end_positions.clamp_(0, ignored_index)
722
-
723
- loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
724
- start_loss = loss_fct(start_logits, start_positions)
725
- end_loss = loss_fct(end_logits, end_positions)
726
- total_loss = (start_loss + end_loss) / 2
727
- return total_loss
728
- else:
729
- return start_logits, end_logits
730
-
731
-
732
- class BertForMultipleChoice(nn.Module):
733
- """BERT model for multiple choice tasks.
734
- This module is composed of the BERT model with a linear layer on top of
735
- the pooled output.
736
- Params:
737
- `config`: a BertConfig class instance with the configuration to build a new model.
738
- `num_choices`: the number of classes for the classifier. Default = 2.
739
- Inputs:
740
- `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
741
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
742
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
743
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
744
- with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
745
- and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
746
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
747
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
748
- input sequence length in the current batch. It's the mask that we typically use for attention when
749
- a batch has varying length sentences.
750
- `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
751
- with indices selected in [0, ..., num_choices].
752
- Outputs:
753
- if `labels` is not `None`:
754
- Outputs the CrossEntropy classification loss of the output with the labels.
755
- if `labels` is `None`:
756
- Outputs the classification logits of shape [batch_size, num_labels].
757
- Example usage:
758
- ```python
759
- # Already been converted into WordPiece token ids
760
- input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
761
- input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
762
- token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
763
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
764
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
765
- num_choices = 2
766
- model = BertForMultipleChoice(config, num_choices)
767
- logits = model(input_ids, token_type_ids, input_mask)
768
- ```
769
- """
770
-
771
- def __init__(self, config, num_choices=2):
772
- super(BertForMultipleChoice, self).__init__()
773
- self.num_choices = num_choices
774
- self.bert = BertModel(config)
775
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
776
- self.classifier = nn.Linear(config.hidden_size, 1)
777
-
778
- def init_weights(module):
779
- if isinstance(module, (nn.Linear, nn.Embedding)):
780
- # Slightly different from the TF version which uses truncated_normal for initialization
781
- # cf https://github.com/pytorch/pytorch/pull/5617
782
- module.weight.data.normal_(mean=0.0, std=config.initializer_range)
783
- elif isinstance(module, BERTLayerNorm):
784
- module.beta.data.normal_(mean=0.0, std=config.initializer_range)
785
- module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
786
- if isinstance(module, nn.Linear):
787
- module.bias.data.zero_()
788
-
789
- self.apply(init_weights)
790
-
791
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
792
- flat_input_ids = input_ids.view(-1, input_ids.size(-1))
793
- flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
794
- flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
795
- _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask)
796
- pooled_output = self.dropout(pooled_output)
797
- logits = self.classifier(pooled_output)
798
- reshaped_logits = logits.view(-1, self.num_choices)
799
-
800
- if labels is not None:
801
- loss_fct = CrossEntropyLoss()
802
- loss = loss_fct(reshaped_logits, labels)
803
- return loss
804
- else:
805
- return reshaped_logits
806
-
807
-
808
- class BertPalsEncoder(torch.nn.Module):
809
- def __init__(self, config: str, task_ids: List[str], checkpoint):
810
- super(BertPalsEncoder, self).__init__()
811
- self.bert_config = BertPalConfig.from_json_file(config) if type(config) == str else config
812
- self.bert_config.num_tasks = len(task_ids)
813
- if type(checkpoint) != str:
814
- self.bert_config.vocab_size = checkpoint.config.vocab_size
815
- self.bert = BertModel(self.bert_config) if type(config) == str else checkpoint
816
- self.task_idx = {task: i for i, task in enumerate(task_ids)}
817
- print(self.task_idx)
818
-
819
- def init_weights(module):
820
- if isinstance(module, (torch.nn.Linear, torch.nn.Embedding)):
821
- # Slightly different from the TF version which uses truncated_normal for initialization
822
- # cf https://github.com/pytorch/pytorch/pull/5617
823
- module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
824
- elif isinstance(module, BERTLayerNorm):
825
- module.bias.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
826
- module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
827
- if isinstance(module, torch.nn.Linear):
828
- if module.bias is not None:
829
- module.bias.data.zero_()
830
-
831
- if type(config) == str:
832
- if type(checkpoint) == str:
833
- chk = torch.load(checkpoint, map_location='cpu')
834
- update = {k.replace("bert.", ""): v for k, v in chk.items()}
835
- else:
836
- self.apply(init_weights)
837
- partial = checkpoint.state_dict()
838
- model_dict = self.bert.state_dict()
839
- update = {}
840
- for n, p in model_dict.items():
841
- if 'aug' in n or 'mult' in n:
842
- update[n] = p
843
- if 'pooler.mult' in n and 'bias' in n:
844
- update[n] = partial['pooler.dense.bias']
845
- if 'pooler.mult' in n and 'weight' in n:
846
- update[n] = partial['pooler.dense.weight']
847
- else:
848
- update[n] = partial[n]
849
- self.bert.load_state_dict(update)
850
-
851
- def forward(self, input_ids, attention_mask=None, task_id=None):
852
- embedding = self.bert(input_ids, attention_mask=attention_mask, i=self.task_idx[task_id])
853
- return embedding[0][-1]
854
-
855
- def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
856
- return self.bert.resize_token_embeddings(new_num_tokens)
857
-
858
- def save_pretrained(self, save_path: str):
859
- os.makedirs(save_path, exist_ok=True)
860
- torch.save(self.bert.state_dict(), f'{save_path}/pytorch_model.bin')
861
- torch.save(self.bert.config.save_pretrained(save_path))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/EVALUATION.md DELETED
@@ -1,131 +0,0 @@
1
- ## Evaluation
2
-
3
- - SciRepEval can be used to evaluate scientific document representations on 4 task types - classification, regression, proximity based retrieval ( a document is the query) and ad-hoc search ( raw text query).
4
- - The evaluation process for each task consists of 2 steps - representation generation with a model and raw metadata; and evaluating these representation as features of the labelled test examples using a suitable metric.
5
-
6
- To reproduce the results in the paper for all or a collection of tasks in SciRepEval, follow the steps in [BENCHMARKING.md](https://github.com/allenai/scirepeval/blob/main/BENCHMARKING.md).
7
-
8
-
9
- ### Custom Evaluation
10
- #### SciRepEval config
11
- The evaluation setup for the existing tasks in SciRepEval can be configured in [scirepeval_tasks.jsonl](https://github.com/allenai/scirepeval/blob/main/scirepeval_tasks.jsonl).
12
- These config parameters are internally parsed by the evaluators to generate the document representations and compute the relevant metric.
13
-
14
- **Example task config**:
15
- ```json
16
- {
17
- "name": "Biomimicry",
18
- "type": "classification",
19
- "data":
20
- {
21
- "meta":
22
- {
23
- "name": "allenai/scirepeval",
24
- "config": "biomimicry"
25
- },
26
- "test":
27
- {
28
- "name": "allenai/scirepeval_test",
29
- "config": "biomimicry"
30
- }
31
- },
32
- "metrics":
33
- [
34
- "f1"
35
- ],
36
- "few_shot":
37
- [
38
- {
39
- "sample_size": 64,
40
- "iterations": 50
41
- },
42
- {
43
- "sample_size": 16,
44
- "iterations": 100
45
- }
46
- ]
47
- }
48
- ```
49
- **Notes**
50
-
51
- 1. `"name"` - identifier for the task, can be utilized when filtering the tasks for evaluation.
52
- 2. `"type"`- can be one of `{"classification", "regression", "proximity", "adhoc_search"}`, for multi-label classification, provide additional `"multi_label"=true` flag.
53
- 3. `"data"` is required and expects at-least two entries: `"meta"` for the raw test data with title and abstracts for representation generation and `"test"` for the labelled examples. These can be local file paths or HuggingFace datasets.
54
- 4. `"metrics"` is a list of the metrics to be computed for the task. These can be customized based on task type as follows:
55
- ```python
56
- if "type" == "classification":
57
- metrics can be {"f1", "accuracy", "precision", "recall", "{f1|precision|recall}_{macro|micro}"}
58
- elif "type" == "regression":
59
- metrics can be {"mse", "r2", "pearsonr","kendalltau"}
60
- else:
61
- metrics can be anything allowed in pytrec_eval*
62
- ```
63
- *[pytrec_eval](https://github.com/cvangysel/pytrec_eval)
64
-
65
- 5. Classification tasks can be additionally evaluated in few shot mode, provide a list of `"sample_size"` and `"iterations"`.
66
- 6. To avoid generating embeddings in every run, these can be cached and re-loaded in future runs by providing the `"embedding"` config as-
67
- ```json
68
- "embeddings":{"save":"<embeddings_dir>/<embeddings_file>.jsonl"}
69
- ```
70
-
71
- OR
72
-
73
- ```json
74
- "embeddings":{"load":"<embeddings_dir>/<embeddings_file>.jsonl"}
75
- ```
76
-
77
- #### Custom Tasks
78
- For evaluating on new tasks from any of the four task types in SciRepEval, create the task config json as above and either append it to **scirepeval_tasks.jsonl** or add it to a new config file.
79
-
80
- To evaluate on all tasks:
81
- Select model parameters as in [here](https://github.com/allenai/scirepeval/blob/main/BENCHMARKING.md#models). eg.
82
- ```bash
83
- python scirepeval.py -m allenai/scirepeval_ctrl --ctrl-tokens --tasks-config scirepeval_tasks.jsonl --output scirepeval_results.json
84
- ```
85
- OR
86
-
87
- ```python
88
- from scirepeval import SciRepEval
89
- from evaluation.encoders import Model
90
-
91
- #Base/MTL CLS
92
- model = Model(variant="default", base_checkpoint="allenai/specter")
93
-
94
- #MTL CTRL
95
- model = Model(variant="default", base_checkpoint="allenai/scirepeval_ctrl", use_ctrl_codes=True)
96
-
97
- #PALs
98
- model = Model(variant="pals", base_checkpoint="allenai/scirepeval_pals", all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
99
-
100
- #Adapters
101
- adapters_dict = {"[CLF]": "allenai/scirepeval_adapters_clf", "[QRY]": "allenai/scirepeval_adapters_qry", "[RGN]": "allenai/scirepeval_adapters_rgn", "[PRX]": "allenai/scirepeval_prx"}
102
- model = Model(variant="adapters", base_checkpoint="malteos/scincl", adapters_load_from=adapters_dict, all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
103
-
104
- #Fusion
105
- model = Model(variant="fusion", base_checkpoint="malteos/scincl", adapters_load_from=adapters_dict, fusion_load_from=<fusion chkpoint directory>, all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
106
-
107
- #Choose the task names from scirepeval_tasks.jsonl
108
- evaluator = SciRepEval(tasks_config="scirepeval_tasks.jsonl", task_list:Optional=[...], task_format:Optional=[...])
109
- evaluator.evaluate(model, "scirepeval_results.json")
110
- ```
111
-
112
- #### Mean Pool Ensemble
113
-
114
- To generate and evaluate the mean of multiple models, provide a list of models to the `evaluate method`.
115
- ```python
116
- from scirepeval import SciRepEval
117
- from evaluation.encoders import Model
118
-
119
- #MTL CTRL
120
- model1 = Model(variant="default", base_checkpoint="malteos/scincl", use_ctrl_codes=True)
121
-
122
- #Adapters
123
- adapters_dict = {"[CLF]": "allenai/scirepeval_adapters_clf", "[QRY]": "allenai/scirepeval_adapters_qry", "[RGN]": "allenai/scirepeval_adapters_rgn", "[PRX]": "allenai/scirepeval_adapters_prx"}
124
- model2 = Model(variant="adapters", base_checkpoint="malteos/scincl", adapters_load_from=adapters_dict, all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
125
-
126
- models = [model1, model2]
127
- evaluator = SciRepEval(tasks_config="scirepeval_tasks_adapters.jsonl", batch_size=16)
128
- evaluator.evaluate(models, "scirepeval_results.json")
129
-
130
- ```
131
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/INFERENCE.md DELETED
@@ -1,98 +0,0 @@
1
-
2
- ## Inference
3
-
4
- This guide provides the steps to generate relevant document embeddings with SciRepEval models.
5
-
6
- ### Step 1 Create a Model instance
7
- ```python
8
- from evaluation.encoders import Model
9
-
10
- #Base/MTL CLS
11
- model = Model(variant="default", base_checkpoint="allenai/specter")
12
-
13
- #MTL CTRL
14
- model = Model(variant="default", base_checkpoint="allenai/scirepeval_ctrl", use_ctrl_codes=True)
15
-
16
- #PALs
17
- model = Model(variant="pals", base_checkpoint="allenai/scirepeval_pals", all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
18
-
19
- #Adapters
20
- adapters_dict = {"[CLF]": "allenai/scirepeval_adapters_clf", "[QRY]": "allenai/scirepeval_adapters_qry", "[RGN]": "allenai/scirepeval_adapters_rgn", "[PRX]": "allenai/scirepeval_prx"}
21
- model = Model(variant="adapters", base_checkpoint="malteos/scincl", adapters_load_from=adapters_dict, all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
22
-
23
- #Fusion
24
- model = Model(variant="fusion", base_checkpoint="malteos/scincl", adapters_load_from=adapters_dict, fusion_load_from=<fusion chkpoint directory>, all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"])
25
-
26
- ```
27
-
28
- ### Step 2 Determine task type
29
- Choose the relevant task id value from the below python dict keyed on task type
30
- ``TASK_IDS = {"classification": "[CLF]", "regression": "[RGN]", "proximity": "[PRX]",
31
- "adhoc_search": {"query": "[QRY]", "candidates": "[PRX]"}}``
32
-
33
- ```python
34
- model.task_id = "[CLF]" #OR "[RGN]"/"[PRX]"/{{"query": "[QRY]", "candidates": "[PRX]"}}}
35
- ```
36
-
37
- For feeding raw text input to the model, follow step 3. If working with a specific dataset jump to Step 4.
38
-
39
- ### Step 3 Generate embeddings for raw text
40
- Use the model object as a callable.
41
- ```python
42
- embeddings = model("Attention is all you need[SEP]Attention is all you need")
43
- ```
44
-
45
- ### Step 4 Generate embeddings for a dataset
46
-
47
- - If data instances consists of records with fields: eg.
48
- ```json
49
- {
50
- "corpus_id": 22715986,
51
- "title": "Accuracy of MRI for treatment response assessment after taxane- and anthracycline-based neoadjuvant chemotherapy in HER2-negative breast cancer.",
52
- "abstract": "BACKGROUND\nStudies suggest that MRI is an accurate means for assessing tumor size after neoadjuvant chemotherapy (NAC). However, accuracy might be dependent on the receptor status of tumors. MRI accuracy for response assessment after homogenous NAC in a relative large group of patients with stage II/III HER2-negative breast cancer has not been reported before.\n\n\nMETHODS\n250 patients from 26 hospitals received NAC (docetaxel, adriamycin and cyclophosphamide) in the context of the NEOZOTAC trial. MRI was done after 3 cycles and post-NAC. Imaging (RECIST 1.1) and pathological (Miller and Payne) responses were recorded."
53
- }
54
- ```
55
- ```python
56
- from evaluation.eval_datasets import SimpleDataset
57
- from evaluation.evaluator import Evaluator
58
-
59
- dataset = ("allenai/scirepeval", "biomimicry") #OR path like "scirepeval/biomimicry/test.json"
60
- evaluator = Evaluator(name="biomimcry", dataset, SimpleDataset, model, batch_size=32, fields=["title", "abstract"], key="paper_id")
61
- embeddings = evaluator.generate_embeddings(save_path="embeddings.json")
62
- ```
63
- - If data instances consists of query-candidate pairs: eg.
64
- ```json
65
- {
66
- "dataset": "aminer",
67
- "query":
68
- {
69
- "corpus_id": 24254880,
70
- "title": "[Characteristics of heavy metal elements and their relationship with magnetic properties of river sediment from urban area in Lanzhou].",
71
- "abstract": "The contents of As, Co, Cr, Cu, Ni, Pb, V and Zn in the surface sediments from 8 rivers in urban area in Lanzhou were monitored by ecological risk which was assessed by the potential ecological Håkanson index, and the index of geoaccumulation (Igeo), sediment enrichment factor (R), and environmental magnetism. The results showed that: (1) the potential ecological risk of heavy metals of As, Co, Ni, V in surface sediments from 8 rivers were low, which belonged to low ecological risk. But the risk of heave metals Cr, Pb, Zn in surface sediments from Yuer river was high, which belonged to middle ecological risk, and in downstream of Yuer river, the element of Cu belonged to high ecological risk. (2) The rivers in Lanzhou could be divided into four groups according to the heavy mental pollution degree: first type, such as Paihong river, Shier river, Yuer river and Shuimo river, called downstream concentrate type; second type, such as Qili river, called upstream concentrate type; third type, such as Luoguo river and Dasha river, called less affected type; fourth type, Lanni river, which polluted heavily in up and downstream; (3) The correlation analysis between magnetic parameters and element contents show that the parameters which mainly reflect the concentration of the magnetic minerals (X, SIRM, Ms) have close association with Cr, Ni, Pb, Zn, Cu, So we can infer that the magnetic minerals in deposits samples mainly came from electroplating effluent, motor vehicle emission, and domestic sewage. SIRM/X shows a strong correlation with Cr, Ni, Pb, Zn, indicating the distribution of anthropogenic particulates. (4) The magnetic minerals(X, SIRM, Ms) have a strong correlation with the geoaccumulation (Igeo) than potential ecological risk index and enrichment factor (R). These results suggest a possible approach for source identification of magnetic material in pollution studies and the validity of using magnetic measurements to mapping the polluted area."
72
- },
73
- "candidates":
74
- [
75
- {
76
- "corpus_id": 12540419,
77
- "title": "Combination of magnetic parameters and heavy metals to discriminate soil-contamination sources in Yinchuan--a typical oasis city of Northwestern China.",
78
- "abstract": "Various industrial processes and vehicular traffic result in harmful emissions containing both magnetic minerals and heavy metals. In this study, we investigated the levels of magnetic and heavy metal contamination of topsoils from Yinchuan city in northwestern China. The results demonstrate that magnetic mineral assemblages in the topsoil are dominated by pseudo-single domain (PSD) and multi-domain (MD) magnetite. The concentrations of anthropogenic heavy metals (Cr, Cu, Pb and Zn) and the magnetic properties of χlf, SIRM, χARM, and 'SOFT' and 'HARD' remanence are significantly correlated, suggesting that the magnetic minerals and heavy metals have common sources. Combined use of principal components and fuzzy cluster analysis of the magnetic and chemical data set indicates that the magnetic and geochemical properties of the particulates emitted from different sources vary significantly. Samples from university campus and residential areas are mainly affected by crustal material, with low concentrations of magnetic minerals and heavy metals, while industrial pollution sources are characterized by high concentrations of coarse magnetite and Cr, Cu, Pb and Zn. Traffic pollution is characterized by Pb and Zn, and magnetite. Magnetic measurements of soils are capable of differentiating sources of magnetic minerals and heavy metals from industrial processes, vehicle fleets and soil parent material.",
79
- "score": 1
80
- }
81
- ]...
82
- }
83
- ```
84
-
85
- ```python
86
- from evaluation.eval_datasets import IRDataset
87
- from evaluation.evaluator import Evaluator
88
-
89
- dataset = ("allenai/scirepeval", "feeds_1") #OR path like "scirepeval/feeds_1/test.json"
90
- evaluator = Evaluator(name="biomimcry", dataset, IRDataset, model, batch_size=32, fields=["title", "abstract"], key="doc_id")
91
- embeddings = evaluator.generate_embeddings(save_path="embeddings.json")
92
- ```
93
-
94
-
95
-
96
-
97
-
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/embeddings_generator.py DELETED
@@ -1,53 +0,0 @@
1
- from typing import Dict, List, Union
2
-
3
- from evaluation.encoders import Model
4
- from tqdm import tqdm
5
- import numpy as np
6
- import json
7
- import pathlib
8
- import logging
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- class EmbeddingsGenerator:
14
- def __init__(self, datasets, models: Union[Model, List[Model]]):
15
- self.datasets = datasets
16
- self.models = models
17
-
18
- def generate_embeddings(self, save_path: str = None, htrans=False, document=False) -> Dict[str, np.ndarray]:
19
- results = dict()
20
- try:
21
- for dataset, model in zip(self.datasets, self.models):
22
- for batch, batch_ids in tqdm(dataset.batches(htrans, document), total=len(dataset) // dataset.batch_size):
23
- emb = model(batch, batch_ids)
24
- for paper_id, embedding in zip(batch_ids, emb.unbind()):
25
- if type(paper_id) == tuple:
26
- paper_id = paper_id[0]
27
- if paper_id not in results:
28
- results[paper_id] = embedding.detach().cpu().numpy()
29
- else:
30
- results[paper_id] += embedding.detach().cpu().numpy()
31
- del batch
32
- del emb
33
- results = {k: v/len(self.models) for k, v in results.items()}
34
- except Exception as e:
35
- print(e)
36
- finally:
37
- if save_path:
38
- pathlib.Path(save_path).parent.mkdir(parents=True, exist_ok=True)
39
- with open(save_path, 'w') as fout:
40
- for k, v in results.items():
41
- fout.write(json.dumps({"doc_id": k, "embedding": v.tolist()}) + '\n')
42
- logger.info(f"Generated {len(results)} embeddings")
43
- return results
44
-
45
- @staticmethod
46
- def load_embeddings_from_jsonl(embeddings_path: str) -> Dict[str, np.ndarray]:
47
- embeddings = {}
48
- with open(embeddings_path, 'r') as f:
49
- for line in tqdm(f, desc=f'reading embeddings from {embeddings_path}'):
50
- line_json = json.loads(line)
51
- embeddings[line_json['doc_id']] = np.array(line_json['embedding'], dtype=np.float16)
52
- logger.info(f"Loaded {len(embeddings)} embeddings")
53
- return embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/encoders.py DELETED
@@ -1,320 +0,0 @@
1
- from typing import Dict, Union, List
2
- import numpy as np
3
- from transformers import AutoModel, AutoTokenizer
4
- import os
5
- from bert_pals import BertPalsEncoder, BertPalConfig, BertModel
6
- from adapter_fusion import AdapterEncoder, AdapterFusion
7
- from htrans.model import HTransModel, HTransConfig
8
- from nltk.tokenize import sent_tokenize
9
- import torch
10
- import logging
11
- from collections import OrderedDict,abc
12
- from itertools import chain
13
- from fvcore.nn import FlopCountAnalysis
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class EncoderFactory:
19
- def __init__(self, base_checkpoint: str = None, adapters_load_from: Union[str, Dict] = None,
20
- fusion_load_from: str = None, all_tasks: list = None):
21
- self.base_checkpoint = f"{base_checkpoint}/model" if os.path.isdir(base_checkpoint) else base_checkpoint
22
- self.all_tasks = all_tasks
23
- self.adapters_load_from = f"{adapters_load_from}/model/adapters" if (type(
24
- adapters_load_from) == str and os.path.isdir(
25
- adapters_load_from)) else adapters_load_from
26
- self.fusion_load_from = f"{fusion_load_from}/model"
27
-
28
- def get_encoder(self, variant: str):
29
- if variant == "default":
30
- return AutoModel.from_pretrained(self.base_checkpoint)
31
- elif variant == "pals":
32
- # needs all task names and a local checkpoint path
33
- if os.path.isdir(self.base_checkpoint):
34
- return BertPalsEncoder(config=f"{self.base_checkpoint}/config.json", task_ids=self.all_tasks,
35
- checkpoint=f"{self.base_checkpoint}/pytorch_model.bin")
36
- else:
37
- pals_config = BertPalConfig.from_pretrained(self.base_checkpoint)
38
- pals_model = BertModel.from_pretrained(self.base_checkpoint)
39
- return BertPalsEncoder(config=pals_config, task_ids=self.all_tasks,
40
- checkpoint=pals_model)
41
- elif variant == "adapters":
42
- # needs a base model checkpoint and the adapters to be loaded from local path or dict of (task_id,
43
- # adapter) from adapters hub
44
- return AdapterEncoder(self.base_checkpoint, self.all_tasks, load_as=self.adapters_load_from)
45
- elif variant == "fusion":
46
- # needs a base model and list of adapters/local adapter checkpoint paths to be fused
47
- return AdapterFusion(self.base_checkpoint, self.all_tasks, load_adapters_as=self.adapters_load_from,
48
- fusion_dir=self.fusion_load_from, inference=True)
49
- else:
50
- raise ValueError("Unknown encoder type: {}".format(variant))
51
-
52
-
53
- class Model:
54
- def __init__(self, variant: str = "default", base_checkpoint: str = None,
55
- adapters_load_from: Union[str, Dict] = None, fusion_load_from: str = None,
56
- use_ctrl_codes: bool = False, task_id: Union[str, Dict] = None,
57
- all_tasks: list = None, hidden_dim: int = 768, max_len: int = 512, use_fp16=False, document=False):
58
- self.variant = variant
59
- self.encoder = EncoderFactory(base_checkpoint, adapters_load_from, fusion_load_from, all_tasks).get_encoder(
60
- variant)
61
- if torch.cuda.is_available():
62
- self.encoder.to('cuda')
63
- self.encoder.eval()
64
- tokenizer_checkpoint = f"{base_checkpoint}/tokenizer" if os.path.isdir(base_checkpoint) else base_checkpoint
65
- self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
66
- self.use_ctrl_codes = use_ctrl_codes
67
- self.reqd_token_idx = 0 if not use_ctrl_codes else 1
68
- self._task_id = task_id
69
- self.document = document
70
- if self._task_id:
71
- if use_ctrl_codes:
72
- logger.info(f"Control code used: {self._task_id}")
73
- elif variant != "default":
74
- logger.info(f"Task id used: {self._task_id}")
75
-
76
- self.hidden_dim = hidden_dim
77
- self.max_length = max_len
78
- self.use_fp16 = use_fp16
79
-
80
- @property
81
- def task_id(self):
82
- return self._task_id
83
-
84
- @task_id.setter
85
- def task_id(self, value):
86
- if self.use_ctrl_codes:
87
- logger.info(f"Control code used: {value}")
88
- elif self.variant != "default":
89
- logger.info(f"Task id used: {value}")
90
- self._task_id = value
91
-
92
- def __call__(self, batch, batch_ids=None):
93
- def append_ctrl_code(batch, batch_ids):
94
- if type(self._task_id) == dict:
95
- batch = [f"{self.task_id['query']} {text}" if bid[1] == "q" else f"{self.task_id['candidates']} {text}"
96
- for text, bid in zip(batch, batch_ids)]
97
- else:
98
- batch = [f"{self.task_id} {text}" for text in batch]
99
- return batch
100
-
101
- batch = [batch] if type(batch) == str else batch
102
- batch_ids = [] if not batch_ids else batch_ids
103
- if self.use_ctrl_codes:
104
- batch = append_ctrl_code(batch, batch_ids)
105
- if self.document:
106
- batch = ["".join([list(chain.from_iterable(i))[0]] + [" [SEP] "] + list(chain.from_iterable(i))[1:]) for i in batch]
107
- input_ids = self.tokenizer(batch, padding=True, truncation=True,
108
- return_tensors="pt", return_token_type_ids=False, max_length=self.max_length)
109
- input_ids.to('cuda')
110
- if self.variant == "default":
111
- output = self.encoder(**input_ids)
112
- elif type(self._task_id) != dict:
113
- output = self.encoder(task_id=self._task_id, **input_ids)
114
- else:
115
- x = input_ids["input_ids"]
116
- output = torch.zeros(x.shape[0], x.shape[1], self.hidden_dim).to("cuda")
117
- q_idx = torch.tensor([i for i, b in enumerate(batch_ids) if b[1] == "q"])
118
- c_idx = torch.tensor([i for i, b in enumerate(batch_ids) if b[1] == "c"])
119
-
120
- if not q_idx.shape[0]:
121
- output = self.encoder(task_id=self._task_id["candidates"], **input_ids)
122
- elif not c_idx.shape[0]:
123
- output = self.encoder(task_id=self._task_id["query"], **input_ids)
124
- else:
125
- for i, v in enumerate(sorted(self._task_id.values())):
126
- curr_input_idx = q_idx if v == "[QRY]" else c_idx
127
- curr_input = x[curr_input_idx]
128
- curr_output = self.encoder(task_id=v, input_ids=curr_input,
129
- attention_mask=input_ids["attention_mask"][curr_input_idx])
130
- try:
131
- output[curr_input_idx] = curr_output # adapters
132
- except:
133
- output[curr_input_idx] = curr_output.last_hidden_state # pals
134
- try:
135
- embedding = output.last_hidden_state[:, self.reqd_token_idx, :] # cls token
136
- except:
137
- embedding = output[:, self.reqd_token_idx, :] # cls token
138
- return embedding.half() if self.use_fp16 else embedding
139
-
140
-
141
- class HModel:
142
- def __init__(self, variant: str = "default", base_checkpoint: str = None,
143
- adapters_load_from: Union[str, Dict] = None, fusion_load_from: str = None,
144
- use_ctrl_codes: bool = False, task_id: Union[str, Dict] = None,
145
- all_tasks: list = None, use_fp16=False):
146
- self.variant = variant
147
- # self.encoder = EncoderFactory(base_checkpoint, adapters_load_from, fusion_load_from, all_tasks).get_encoder(
148
- # variant)
149
- self.config = HTransConfig.from_pretrained(base_checkpoint)
150
- self.encoder = HTransModel.from_pretrained(base_checkpoint, config=self.config)
151
- if torch.cuda.is_available():
152
- self.encoder.to('cuda')
153
- self.encoder.eval()
154
- # tokenizer_checkpoint = f"{base_checkpoint}/tokenizer" if os.path.isdir(base_checkpoint) else base_checkpoint
155
- self.tokenizer = AutoTokenizer.from_pretrained(base_checkpoint)
156
- if self.config.max_doc_length > 1:
157
- self.head_ids = torch.tensor([self.tokenizer.get_vocab()["<sec>"], self.tokenizer.get_vocab()["<doc>"]], dtype=torch.int)
158
- self.use_ctrl_codes = use_ctrl_codes
159
- self.reqd_token_idx = 0 if not use_ctrl_codes else 1
160
- self._task_id = task_id
161
- if self._task_id:
162
- if use_ctrl_codes:
163
- logger.info(f"Control code used: {self._task_id}")
164
- elif variant != "default":
165
- logger.info(f"Task id used: {self._task_id}")
166
-
167
- self.use_fp16 = use_fp16
168
-
169
- @property
170
- def task_id(self):
171
- return self._task_id
172
-
173
- @task_id.setter
174
- def task_id(self, value):
175
- if self.use_ctrl_codes:
176
- logger.info(f"Control code used: {value}")
177
- elif self.variant != "default":
178
- logger.info(f"Task id used: {value}")
179
- self._task_id = value
180
-
181
- def __call__(self, batch, batch_ids=None):
182
- def append_ctrl_code(batch, batch_ids):
183
- if type(self._task_id) == dict:
184
- batch = [f"{self.task_id['query']} {text}" if bid[1] == "q" else f"{self.task_id['candidates']} {text}"
185
- for text, bid in zip(batch, batch_ids)]
186
- else:
187
- batch = [f"{self.task_id} {text}" for text in batch]
188
- return batch
189
-
190
- batch = [batch] if type(batch) == str else batch
191
- batch_ids = [] if not batch_ids else batch_ids
192
- if self.use_ctrl_codes:
193
- batch = append_ctrl_code(batch, batch_ids)
194
- inputs = []
195
- pad_input_ids = np.ones((1, self.config.max_sent_length), dtype=np.int64) * self.tokenizer.pad_token_id
196
- pad_attention_mask = np.zeros((1, self.config.max_sent_length), dtype=np.int64)
197
- # pad_token_type_ids = np.zeros((1, self.config.max_sent_length), dtype=np.int64)
198
- def tokenize_document(tokenizer, document, max_sent_length, max_sec_length, max_doc_length=1):
199
- if max_doc_length != 1:
200
- document = document[:max_doc_length]
201
- document = [i[:max_sec_length] for i in document]
202
- text = list(chain.from_iterable(document))
203
- sec_length = [0] + [len(i) for i in document]
204
- inputs = tokenizer(text, return_special_tokens_mask=False, return_tensors="np",
205
- padding="max_length", truncation=True)
206
- pad_input_ids = np.ones((1, max_sent_length), dtype=np.int64) * tokenizer.pad_token_id
207
- pad_attention_mask = np.zeros((1, max_sent_length), dtype=np.int64)
208
- pad_token_type_ids = np.zeros((1, max_sent_length), dtype=np.int64)
209
- sec_inputs = [{"input_ids": np.column_stack(
210
- [np.expand_dims(
211
- np.concatenate(inputs["input_ids"][sum(sec_length[:i]): sum(sec_length[:i]) + sec_length[i + 1]],
212
- axis=0), axis=0)] + [pad_input_ids] * (
213
- max_sec_length - sec_length[i + 1])),
214
- "attention_mask": np.column_stack(
215
- [np.expand_dims(np.concatenate(
216
- inputs["attention_mask"][sum(sec_length[:i]): sum(sec_length[:i]) + sec_length[i + 1]], axis=0),
217
- axis=0)] + [
218
- pad_attention_mask] * (max_sec_length - sec_length[i + 1]))} for i in
219
- range(len(sec_length) - 1)]
220
-
221
- if max_doc_length > 1:
222
- pad_sec_input_ids = np.ones((1, max_sent_length * max_sec_length),
223
- dtype=np.int64) * tokenizer.pad_token_id
224
- pad_sec_attention_mask = np.zeros((1, max_sent_length * max_sec_length), dtype=np.int64)
225
- pad_sec_token_type_ids = np.zeros((1, max_sent_length * max_sec_length), dtype=np.int64)
226
- return {"input_ids": np.column_stack(
227
- [i["input_ids"] for i in sec_inputs] + [pad_sec_input_ids] * (max_doc_length - len(sec_inputs))),
228
- "attention_mask": np.column_stack(
229
- [i["attention_mask"] for i in sec_inputs] + [pad_sec_attention_mask] * (
230
- max_doc_length - len(sec_inputs))),
231
- "sec_mask": np.column_stack([np.ones((1, inputs["input_ids"].shape[0]), dtype=np.int64)] + (
232
- max_doc_length * max_sec_length - inputs["input_ids"].shape[0]) * [
233
- np.zeros((1, 1), dtype=np.int64)]),
234
- "doc_mask": np.column_stack(
235
- [np.ones((1, len(sec_inputs)), dtype=np.int64)] + (max_doc_length - len(sec_inputs)) * [
236
- np.zeros((1, 1), dtype=np.int64)]),
237
- "head_ids": np.array([[self.tokenizer.get_vocab()["<sec>"], self.tokenizer.get_vocab()["<doc>"]]],
238
- dtype=np.int64)
239
- }
240
- return dict(zip(sec_inputs[0].keys(),
241
- [np.concatenate([d[key] for d in sec_inputs]) for key in sec_inputs[0].keys()]))
242
- if self.config.max_doc_length > 1:
243
- for sample in batch:
244
- if type(sample) == str:
245
- sample = [[sample]]
246
- inputs.append(tokenize_document(self.tokenizer,sample, self.config.max_sent_length, self.config.max_sec_length, self.config.max_doc_length))
247
- input_ids = dict(zip(inputs[0].keys(), [torch.tensor(np.concatenate([d[key] for d in inputs])) for key in inputs[0].keys()]))
248
-
249
- else:
250
- for sample in batch:
251
- if type(sample) == str:
252
- sample = [sample]
253
- else:
254
- sample = list(chain.from_iterable(sample))
255
- sentences = sample[:self.config.max_sec_length]
256
- tokenized_sample = self.tokenizer(sentences, padding="max_length", truncation=True,
257
- return_tensors="np", return_token_type_ids=False)
258
- inputs.append({"input_ids": np.row_stack([tokenized_sample["input_ids"]] + [pad_input_ids] * (self.config.max_sec_length - len(sentences))).reshape((1, self.config.max_sent_length*self.config.max_sec_length)),
259
- "attention_mask": np.row_stack([tokenized_sample["attention_mask"]] + [pad_attention_mask] * (self.config.max_sec_length - len(sentences))).reshape((1, self.config.max_sent_length*self.config.max_sec_length)),
260
- "sec_mask": np.column_stack(
261
- [np.ones((1, tokenized_sample["input_ids"].shape[0]), dtype=np.int64)] + (
262
- self.config.max_sec_length - tokenized_sample["input_ids"].shape[0]) * [
263
- np.zeros((1, 1), dtype=np.int64)]),
264
- "head_ids": np.array(
265
- [[self.tokenizer.get_vocab()["<sec>"], self.tokenizer.get_vocab()["<doc>"]]],
266
- dtype=np.int64)
267
- })
268
- input_ids = dict(zip(inputs[0].keys(), [torch.tensor(np.concatenate([d[key] for d in inputs])) for key in inputs[0].keys()]))
269
- input_ids = move_to_device(input_ids, "cuda")
270
-
271
- if self.variant == "default":
272
- output = self.encoder(**input_ids)
273
- elif type(self._task_id) != dict:
274
- output = self.encoder(task_id=self._task_id, **input_ids)
275
- else:
276
- x = input_ids["input_ids"]
277
- output = torch.zeros(x.shape[0], x.shape[1], self.config.hidden_size).to("cuda")
278
- q_idx = torch.tensor([i for i, b in enumerate(batch_ids) if b[1] == "q"])
279
- c_idx = torch.tensor([i for i, b in enumerate(batch_ids) if b[1] == "c"])
280
-
281
- if not q_idx.shape[0]:
282
- output = self.encoder(task_id=self._task_id["candidates"], **input_ids)
283
- elif not c_idx.shape[0]:
284
- output = self.encoder(task_id=self._task_id["query"], **input_ids)
285
- else:
286
- for i, v in enumerate(sorted(self._task_id.values())):
287
- curr_input_idx = q_idx if v == "[QRY]" else c_idx
288
- curr_input = x[curr_input_idx]
289
- curr_output = self.encoder(task_id=v, input_ids=curr_input,
290
- attention_mask=input_ids["attention_mask"][curr_input_idx])
291
- try:
292
- output[curr_input_idx] = curr_output # adapters
293
- except:
294
- output[curr_input_idx] = curr_output.last_hidden_state # pals
295
- try:
296
- if self.config.pool_scheme == "first-token":
297
- # embedding = output.last_hidden_state[:, self.reqd_token_idx, :] # cls token
298
- embedding = output.last_hidden_state[:, [i * self.config.max_sent_length + self.reqd_token_idx for i in
299
- range(self.config.max_sec_length)], :].mean(dim=1) # cls token
300
- elif self.config.pool_scheme == "avg":
301
- # embedding = output.last_hidden_state.mean(dim=1)
302
- embedding = output.last_hidden_state[:, [i*self.config.max_sent_length+self.reqd_token_idx for i in range(self.config.max_sec_length)], :].mean(dim=1) # cls token
303
- # embedding = output.last_hidden_state[:, self.reqd_token_idx, :] # cls token
304
- elif self.config.pool_scheme == "max":
305
- embedding = output.last_hidden_state.max(dim=1)[0]
306
- except:
307
- embedding = output[:, self.reqd_token_idx, :] # cls token
308
- return embedding.half() if self.use_fp16 else embedding
309
-
310
-
311
- def move_to_device(batch, device):
312
- r"""Puts each data field to the device"""
313
- if isinstance(batch, torch.Tensor):
314
- return batch.to(device)
315
- elif isinstance(batch,(list,tuple)):
316
- return tuple(move_to_device(item,device) for item in batch)
317
- elif isinstance(batch, abc.Mapping):
318
- return {key: move_to_device(value,device) for key, value in batch.items()}
319
- else:
320
- return batch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/eval_datasets.py DELETED
@@ -1,96 +0,0 @@
1
- import logging
2
- import os
3
- from typing import Union, List
4
- from nltk import sent_tokenize
5
- import datasets
6
-
7
- logger = logging.getLogger(__name__)
8
-
9
-
10
- class SimpleDataset:
11
-
12
- def __init__(self, data_path: Union[str, tuple], sep_token: str, batch_size=32,
13
- fields: List = None, key: str = None, processing_fn=None):
14
- self.batch_size = batch_size
15
- self.sep_token = sep_token
16
- if not fields:
17
- fields = ["title", "abstract"]
18
- self.fields = fields
19
- logger.info(f"Loading test metadata from {data_path}")
20
- if not processing_fn:
21
- if type(data_path) == str and os.path.isfile(data_path):
22
- self.data = datasets.load_dataset("json", data_files={"test": data_path})["test"]
23
- else:
24
- self.data = datasets.load_dataset(data_path[0], data_path[1], split="evaluation")
25
- else:
26
- self.data = processing_fn(data_path)
27
- logger.info(f"Loaded {len(self.data)} documents")
28
- self.seen_ids = set()
29
- self.key = key
30
- def __len__(self):
31
- return len(self.data)
32
-
33
- def batches(self, htrans=False, document=False):
34
- return self.process_batches(self.data, htrans=htrans, document=document)
35
-
36
- def process_batches(self, data: Union[datasets.Dataset, List], htrans=False, document=False):
37
- # create batches
38
- batch = []
39
- batch_ids = []
40
- batch_size = self.batch_size
41
- i = 0
42
- key = "doc_id" if not self.key else self.key
43
- for d in data:
44
- if key in d and d[key] not in self.seen_ids:
45
- bid = d[key]
46
- self.seen_ids.add(bid)
47
- if htrans:
48
- text = [[d["title"]] + sent_tokenize(d["abstract"])]
49
- text += [[i["title"]] + i["sentences"] for i in d["full_text"]]
50
- else:
51
- text = []
52
- for field in self.fields:
53
- if d.get(field):
54
- text.append(str(d[field]))
55
- text = (f" {self.sep_token} ".join(text)).strip()
56
- if document:
57
- for sec in d.get("full_text", []):
58
- text += (sec["title"] + " ")
59
- text += "".join(sec["sentences"])
60
- if (i) % batch_size != 0 or i == 0:
61
- batch_ids.append(bid)
62
- batch.append(text)
63
- else:
64
- yield batch, batch_ids
65
- batch_ids = [bid]
66
- batch = [text]
67
- i += 1
68
- if len(batch) > 0:
69
- yield batch, batch_ids
70
-
71
-
72
- class IRDataset(SimpleDataset):
73
- def __init__(self, data_path, sep_token, batch_size=32, fields=None, key=None, processing_fn=None):
74
- super().__init__(data_path, sep_token, batch_size, fields, key, processing_fn)
75
- self.queries, self.candidates = [], []
76
- self.search = False
77
- for d in self.data:
78
- if type(d["query"]) == str:
79
- self.search = True
80
- self.queries.append({"title": d["query"], "doc_id": d["doc_id"]})
81
- else:
82
- self.queries.append(d["query"])
83
- self.candidates += (d["candidates"])
84
-
85
- def __len__(self):
86
- return len(self.queries) + len(self.candidates)
87
-
88
- def batches(self, htrans=False, document=False):
89
- query_gen = self.process_batches(self.queries, htrans=htrans and self.search, document=document and self.search)
90
- cand_gen = self.process_batches(self.candidates, htrans=htrans, document=document)
91
- for q, q_ids in query_gen:
92
- q_ids = [(v, "q") for v in q_ids]
93
- yield q, q_ids
94
- for c, c_ids in cand_gen:
95
- c_ids = [(v, "c") for v in c_ids]
96
- yield c, c_ids
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/evaluator.py DELETED
@@ -1,228 +0,0 @@
1
- from typing import Union, Dict, Tuple
2
-
3
- import numpy as np
4
- from lightning.classification import LinearSVC
5
- from lightning.regression import LinearSVR
6
- from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, mean_squared_error, r2_score
7
- from scipy.stats import kendalltau, pearsonr
8
- from sklearn.model_selection import GridSearchCV
9
- from sklearn.multiclass import OneVsRestClassifier
10
-
11
- from evaluation.embeddings_generator import EmbeddingsGenerator
12
- from abc import ABC, abstractmethod
13
- from evaluation.encoders import Model
14
- from evaluation.eval_datasets import SimpleDataset, IRDataset
15
- import logging
16
- import datasets
17
- import os
18
- from enum import Enum
19
- from sklearn.metrics.pairwise import euclidean_distances
20
- import pytrec_eval
21
-
22
- logging.basicConfig(level=logging.INFO)
23
- logger = logging.getLogger(__name__)
24
- RANDOM_STATE = 42
25
-
26
-
27
- class Evaluator:
28
- def __init__(self, name: str, meta_dataset: Union[str, tuple], dataset_class, model: Model, batch_size: int,
29
- fields: list, key: str = None, process_fn=None):
30
- if model:
31
- if type(model) != list:
32
- model = [model]
33
- # for m in model:
34
- # if not m.tokenizer.pad_token:
35
- # m.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
36
- # m.tokenizer.padding_side = "left"
37
- # m.tokenizer.sep_token = m.tokenizer.eos_token
38
- # m.encoder.resize_token_embeddings(len(m.tokenizer))
39
- datasets = [dataset_class(meta_dataset, m.tokenizer.sep_token, batch_size, fields, key,
40
- process_fn) for m in model]
41
- self.embeddings_generator = EmbeddingsGenerator(datasets, model)
42
- self.name = name
43
-
44
- def generate_embeddings(self, save_path: str = None, htrans=False, document=False):
45
- logger.info("Generating embeddings... this might take a while")
46
- return self.embeddings_generator.generate_embeddings(save_path, htrans, document)
47
-
48
- @abstractmethod
49
- def evaluate(self, embeddings: Union[str, Dict[str, np.ndarray]], **kwargs) -> Dict[str, float]:
50
- pass
51
-
52
- @abstractmethod
53
- def calc_metrics(self, test, preds) -> Dict[str, float]:
54
- pass
55
-
56
- def print_results(self, results: Dict[str, float]):
57
- if results:
58
- print("*****************************************************")
59
- print(f" {self.name}")
60
- print("*****************************************************")
61
- for k, v in results.items():
62
- print(f" {k}: {v}")
63
- print("*****************************************************")
64
-
65
-
66
- class SupervisedTask(Enum):
67
- CLASSIFICATION = 1
68
- MULTILABEL_CLASSIFICATION = 2
69
- REGRESSION = 3
70
-
71
-
72
- SUPERVISED_TASK_METRICS = {
73
- SupervisedTask.CLASSIFICATION: {"f1": f1_score, "accuracy": accuracy_score, "precision": precision_score,
74
- "recall": recall_score},
75
- SupervisedTask.REGRESSION: {"mse": mean_squared_error, "r2": r2_score, "pearsonr": pearsonr,
76
- "kendalltau": kendalltau}
77
- }
78
-
79
-
80
- class SupervisedEvaluator(Evaluator):
81
- def __init__(self, name: str, task: SupervisedTask, meta_dataset: Union[str, tuple],
82
- test_dataset: Union[str, tuple],
83
- model: Model, metrics: tuple, batch_size: int = 16, fields: list = None):
84
- super(SupervisedEvaluator, self).__init__(name, meta_dataset, SimpleDataset, model, batch_size, fields)
85
- self.test_dataset = test_dataset
86
- self.metrics = metrics
87
- self.task = task
88
-
89
- def evaluate(self, embeddings, **kwargs):
90
- logger.info(f"Loading labelled data from {self.test_dataset}")
91
- if type(self.test_dataset) == str and os.path.isdir(self.test_dataset):
92
- split_dataset = datasets.load_dataset("csv", data_files={"train": f"{self.test_dataset}/train.csv",
93
- "test": f"{self.test_dataset}/test.csv"})
94
- else:
95
- split_dataset = datasets.load_dataset(self.test_dataset[0], self.test_dataset[1])
96
- logger.info(f"Loaded {len(split_dataset['train'])} training and {len(split_dataset['test'])} test documents")
97
- if type(embeddings) == str and os.path.isfile(embeddings):
98
- embeddings = EmbeddingsGenerator.load_embeddings_from_jsonl(embeddings)
99
- x_train, x_test, y_train, y_test = self.read_dataset(split_dataset, embeddings)
100
- eval_fn = self.regression if self.task == SupervisedTask.REGRESSION else self.classify
101
- preds = eval_fn(x_train, x_test, y_train)
102
- results = self.calc_metrics(y_test, preds)
103
- self.print_results(results)
104
- return results
105
-
106
- @staticmethod
107
- def read_dataset(data: datasets.DatasetDict, embeddings: Dict[str, np.ndarray]) -> Tuple[
108
- np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
109
- train, test = data["train"], data["test"]
110
- x_train, x_test = np.array(
111
- [embeddings[str(paper["paper_id"])] for paper in train if str(paper["paper_id"]) in embeddings]), np.array(
112
- [embeddings[str(paper["paper_id"])] for paper in test if str(paper["paper_id"]) in embeddings])
113
- y_train, y_test = np.array(
114
- [paper["label"] for paper in train if str(paper["paper_id"]) in embeddings]), np.array(
115
- [paper["label"] for paper in test if str(paper["paper_id"]) in embeddings])
116
- return x_train, x_test, y_train, y_test
117
-
118
- def classify(self, x_train: np.ndarray, x_test: np.ndarray, y_train: np.ndarray, cv: int = 3,
119
- n_jobs: int = 5):
120
-
121
- Cs = np.logspace(-2, 2, 5)
122
- if self.task == SupervisedTask.MULTILABEL_CLASSIFICATION:
123
- estimator = LinearSVC(max_iter=10000)
124
- svm = GridSearchCV(estimator=estimator, cv=cv, param_grid={'C': Cs}, n_jobs=10)
125
- svm = OneVsRestClassifier(svm, n_jobs=1)
126
- else:
127
- estimator = LinearSVC(loss="squared_hinge", random_state=RANDOM_STATE)
128
- if cv:
129
- svm = GridSearchCV(estimator=estimator, cv=cv, param_grid={'C': Cs}, verbose=1, n_jobs=n_jobs)
130
- else:
131
- svm = estimator
132
- svm.fit(x_train, y_train)
133
- preds = svm.predict(x_test)
134
- return preds
135
-
136
- def regression(self, x_train: np.ndarray, x_test: np.ndarray, y_train: np.ndarray, cv: int = 3,
137
- n_jobs: int = 5):
138
- svm = LinearSVR(random_state=RANDOM_STATE)
139
- Cs = np.logspace(-4, 2, 7)
140
- svm = GridSearchCV(estimator=svm, cv=cv, param_grid={'C': Cs}, verbose=1, n_jobs=n_jobs)
141
- svm.fit(x_train, y_train)
142
- preds = svm.predict(x_test)
143
- return preds
144
-
145
- def calc_metrics(self, test, preds):
146
- results = dict()
147
- if self.task == SupervisedTask.REGRESSION:
148
- for m in self.metrics:
149
- if m in SUPERVISED_TASK_METRICS[self.task]:
150
- result = tuple(SUPERVISED_TASK_METRICS[self.task][m](test, preds))[0]
151
- if m != "mse":
152
- result = np.round(100 * result, 2)
153
- results[m] = result
154
- else:
155
- logger.warning(
156
- f"Metric {m} not found...skipping, try one of {SUPERVISED_TASK_METRICS[self.task].keys()}")
157
- else:
158
- metric_task = SupervisedTask.CLASSIFICATION
159
- for m in self.metrics:
160
- split_m = m.split("_")
161
- if split_m[0] in SUPERVISED_TASK_METRICS[metric_task]:
162
- if len(split_m) > 1:
163
- result = SUPERVISED_TASK_METRICS[metric_task][split_m[0]](test, preds, average=split_m[1])
164
- else:
165
- result = SUPERVISED_TASK_METRICS[metric_task][split_m[0]](test, preds)
166
- results[m] = np.round(100 * result, 2)
167
- else:
168
- logger.warning(
169
- f"Metric {m} not found...skipping, try one of {SUPERVISED_TASK_METRICS[metric_task].keys()}")
170
- return results
171
-
172
-
173
- class IREvaluator(Evaluator):
174
- def __init__(self, name: str, meta_dataset: Union[str, tuple], test_dataset: Union[str, tuple], model: Model,
175
- metrics: tuple, dataset_class=IRDataset, batch_size: int = 16, fields: list = None, key=None):
176
- super(IREvaluator, self).__init__(name, meta_dataset, dataset_class, model, batch_size, fields, key)
177
- self.test_dataset = test_dataset
178
- self.metrics = metrics
179
-
180
- def get_qc_pairs(self, dataset):
181
- pairs = dict()
182
- for row in dataset:
183
- if row["query_id"] not in pairs:
184
- pairs[row["query_id"]] = dict()
185
- pairs[row["query_id"]][row["cand_id"]] = row["score"]
186
- return pairs
187
-
188
- def calc_metrics(self, qrels, run):
189
- evaluator = pytrec_eval.RelevanceEvaluator(qrels, set(self.metrics))
190
- results = evaluator.evaluate(run)
191
-
192
- metric_values = {}
193
- for measure in sorted(self.metrics):
194
- res = pytrec_eval.compute_aggregated_measure(
195
- measure,
196
- [query_measures[measure] for query_measures in results.values()]
197
- )
198
- metric_values[measure] = np.round(100 * res, 2)
199
- return metric_values
200
-
201
- def evaluate(self, embeddings, **kwargs):
202
- logger.info(f"Loading labelled data from {self.test_dataset}")
203
- if type(self.test_dataset) == str and os.path.isdir(self.test_dataset):
204
- split_dataset = datasets.load_dataset("json", data_files={"test": f"{self.test_dataset}/test_qrel.jsonl"})
205
- else:
206
- split_dataset = datasets.load_dataset(self.test_dataset[0], self.test_dataset[1])
207
- logger.info(f"Loaded {len(split_dataset['test'])} test query-candidate pairs")
208
- if type(embeddings) == str and os.path.isfile(embeddings):
209
- embeddings = EmbeddingsGenerator.load_embeddings_from_jsonl(embeddings)
210
-
211
- qrels = self.get_qc_pairs(split_dataset["test"])
212
- preds = self.retrieval(embeddings, qrels)
213
- results = self.calc_metrics(qrels, preds)
214
- self.print_results(results)
215
- return results
216
-
217
- def retrieval(self, embeddings, qrels: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, float]]:
218
- run = dict()
219
- for qid in qrels:
220
- if qid in embeddings:
221
- query = np.array([embeddings[qid]])
222
- cids = [cid for cid in qrels[qid] if cid in embeddings]
223
- cands = np.array([embeddings[cid] for cid in qrels[qid] if cid in embeddings])
224
- scores = euclidean_distances(cands, query).flatten()
225
- run[qid] = dict()
226
- for i, cid in enumerate(cids):
227
- run[qid][cid] = float(-scores[i])
228
- return run
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/few_shot_evaluator.py DELETED
@@ -1,58 +0,0 @@
1
- import math
2
- from typing import Union
3
-
4
- import numpy as np
5
- from sklearn.model_selection import StratifiedKFold
6
-
7
- import evaluation.evaluator
8
- from evaluation.encoders import Model
9
- from evaluation.evaluator import SupervisedEvaluator, SupervisedTask
10
- from tqdm import tqdm
11
-
12
-
13
- class FewShotEvaluator(SupervisedEvaluator):
14
- def __init__(self, name: str, task: SupervisedTask, meta_dataset: Union[str, tuple],
15
- test_dataset: Union[str, tuple], sample_size: int, num_iterations: int,
16
- model: Model, metrics: tuple = None, batch_size: int = 16, fields: list = None):
17
- super(FewShotEvaluator, self).__init__(name, task, meta_dataset, test_dataset, model, metrics, batch_size,
18
- fields)
19
- self.sample_size = sample_size
20
- self.num_iterations = num_iterations
21
-
22
- def classify(self, x, x_test, y, cv=3, n_jobs=1):
23
- stage_preds = []
24
- if self.task == SupervisedTask.MULTILABEL_CLASSIFICATION:
25
- for k in tqdm(range(self.num_iterations)):
26
- idx_set = set()
27
- np.random.seed(evaluation.evaluator.RANDOM_STATE + k)
28
- for yi in range(y.shape[1]):
29
- idx_set.update(
30
- np.random.choice(np.where(y[:, yi] == 1)[0], self.sample_size, replace=False).tolist())
31
- req_idx = list(idx_set)
32
- x_train, y_train = x[req_idx], y[req_idx]
33
- preds = super().classify(x_train, x_test, y_train)
34
- stage_preds.append(preds)
35
- np.random.seed(evaluation.evaluator.RANDOM_STATE)
36
- else:
37
- skf = StratifiedKFold(n_splits=math.ceil(x.shape[0] / self.sample_size))
38
- count = 0
39
- for _, train in tqdm(skf.split(x, y), total=self.num_iterations):
40
- x_train, y_train = x[train], y[train]
41
- res = super().classify(x_train, x_test, y_train, cv=0)
42
- stage_preds.append(res)
43
- count += 1
44
- if count == self.num_iterations:
45
- break
46
- return stage_preds
47
-
48
- def calc_metrics(self, test, preds_list):
49
- stage_results = dict()
50
- for preds in preds_list:
51
- res = super().calc_metrics(test, preds)
52
- for k, v in res.items():
53
- if k not in stage_results:
54
- stage_results[k] = []
55
- stage_results[k].append(v)
56
-
57
- results = {k: np.mean(v) for k, v in stage_results.items()}
58
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/gpt3_encoder.py DELETED
@@ -1,30 +0,0 @@
1
- import os
2
- import openai
3
- import torch
4
- from transformers import GPT2TokenizerFast
5
-
6
-
7
- class GPT3Model:
8
- def __init__(self, embed_model: str):
9
- openai.api_key = os.getenv("OPENAI_API_KEY")
10
- self.embed_model = embed_model
11
- self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
12
-
13
- def __call__(self, batch, batch_ids=None):
14
- batch_embed = []
15
- for iptext in batch:
16
- try:
17
- response = openai.Embedding.create(
18
- input=iptext,
19
- model=self.embed_model
20
- )
21
- embeddings = response['data'][0]['embedding']
22
- batch_embed.append(embeddings)
23
- except:
24
- response = openai.Embedding.create(
25
- input=" ".join(iptext.split(" ")[:450]),
26
- model=self.embed_model
27
- )
28
- embeddings = response['data'][0]['embedding']
29
- batch_embed.append(embeddings)
30
- return torch.tensor(batch_embed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
evaluation/instructor.py DELETED
@@ -1,25 +0,0 @@
1
- from InstructorEmbedding import INSTRUCTOR
2
- from transformers import AutoTokenizer
3
-
4
- instr_format = "Represent the Scientific documents for "
5
-
6
-
7
- class InstructorModel:
8
- def __init__(self, embed_model: str):
9
- self.encoder = INSTRUCTOR(embed_model)
10
- self.task_id = None
11
- self.instruction_map = {"[CLF]": f"{instr_format} classification: ", "[RGN]": f"{instr_format} regression: ",
12
- "[PRX]": f"{instr_format} retrieving similar similar documents: ",
13
- "[SRCH]": {"q": "Represent the Scientific query for retrieving relevant documents: ",
14
- "c": f"{instr_format} for retrieval: "}}
15
- self.tokenizer = AutoTokenizer.from_pretrained(embed_model)
16
- self.tokenizer.sep_token = self.tokenizer.eos_token
17
-
18
- def __call__(self, batch, batch_ids=None):
19
- if type(self.task_id) != dict:
20
- batch = [[self.instruction_map[self.task_id], b] for b in batch]
21
- else:
22
- instructions = [f"{self.instruction_map['[SRCH]'][b[1]]}{batch[i]}" for i, b in enumerate(batch_ids)]
23
- batch = [[ins, b] for ins, b in zip(instructions, batch)]
24
- batch_embed = self.encoder.encode(batch, convert_to_numpy=False, convert_to_tensor=True, device="cuda")
25
- return batch_embed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/classification.py DELETED
@@ -1,24 +0,0 @@
1
- import sys
2
-
3
- sys.path.append('../')
4
-
5
- from evaluation.encoders import Model
6
- from evaluation.evaluator import SupervisedEvaluator, SupervisedTask
7
- from adapter_fusion import AdapterEncoder
8
-
9
- # default no control codes
10
- # model = Model(base_checkpoint="allenai/specter")
11
-
12
- # default control codes
13
- # model = Model(base_checkpoint="../lightning_logs/full_run/scincl_ctrl/checkpoints/", task_id="[CLF]", use_ctrl_codes=True)
14
- # single adapters
15
- model = Model(base_checkpoint="malteos/scincl", variant="adapters",
16
- adapters_load_from="../../phantasm/phantasm_new/lightning_logs/full_run/scincl_adapters/checkpoints/model/adapters",
17
- task_id="[CLF]")
18
-
19
- evaluator = SupervisedEvaluator("biomimicry", SupervisedTask.CLASSIFICATION, ("allenai/scirepeval", "biomimicry"),
20
- ("allenai/scirepeval_test", "biomimicry"), model, metrics=("f1",))
21
-
22
- embeddings = evaluator.generate_embeddings()
23
-
24
- evaluator.evaluate(embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/fewshot_classification.py DELETED
@@ -1,23 +0,0 @@
1
- import sys
2
-
3
- sys.path.append('../')
4
-
5
- from evaluation.encoders import Model
6
- from evaluation.few_shot_evaluator import FewShotEvaluator, SupervisedTask
7
-
8
- # default no control codes
9
- model = Model(base_checkpoint="allenai/specter")
10
-
11
- # default control codes
12
- # model = Model(base_checkpoint="../lightning_logs/full_run/scincl_ctrl/checkpoints/", task_id="[CLF]", use_ctrl_codes=True)
13
-
14
- # single adapters
15
- # model = Model(base_checkpoint="malteos/scincl", variant="adapters",
16
- # adapters_load_from="../lightning_logs/full_run/scincl_adapters/checkpoints/", task_id="[CLF]")
17
- evaluator = FewShotEvaluator("drsm", SupervisedTask.CLASSIFICATION, ("allenai/scirepeval", "drsm"),
18
- ("allenai/scirepeval_test", "drsm"), model=model, metrics=("f1_macro",),
19
- sample_size=16, num_iterations=50)
20
-
21
- embeddings = evaluator.generate_embeddings()
22
-
23
- evaluator.evaluate(embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/regression.py DELETED
@@ -1,23 +0,0 @@
1
- import sys
2
-
3
- sys.path.append('../')
4
-
5
- from evaluation.encoders import Model
6
- from evaluation.evaluator import SupervisedEvaluator, SupervisedTask
7
-
8
- #default no control codes
9
- model = Model(base_checkpoint="allenai/specter")
10
-
11
- #default control codes
12
- # model = Model(base_checkpoint="../lightning_logs/full_run/scincl_ctrl/checkpoints/", task_id="[RGN]", use_ctrl_codes=True)
13
-
14
- #single adapters
15
- # model = Model(base_checkpoint="malteos/scincl", variant="adapters",
16
- # adapters_load_from="../lightning_logs/full_run/scincl_adapters/checkpoints/", task_id="[RGN]")
17
-
18
- evaluator = SupervisedEvaluator("max hIndex", SupervisedTask.REGRESSION, ("allenai/scirepeval", "peer_review_score_hIndex"),
19
- ("allenai/scirepeval_test", "hIndex"), model, metrics=("pearsonr","kendalltau"))
20
-
21
- embeddings = evaluator.generate_embeddings()
22
-
23
- evaluator.evaluate(embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/retrieval.py DELETED
@@ -1,39 +0,0 @@
1
- import sys
2
-
3
- sys.path.append('../')
4
- from evaluation.evaluator import IREvaluator
5
- from evaluation.encoders import Model
6
- from adapter_fusion import AdapterEncoder
7
- from reviewer_matching import ReviewerMatchingEvaluator
8
-
9
- # default no control codes
10
- # model = Model(base_checkpoint="allenai/specter")
11
-
12
- # default control codes
13
- # model = Model(base_checkpoint="../lightning_logs/full_run/scincl_ctrl/checkpoints/", task_id="[PRX]", use_ctrl_codes=True)
14
-
15
-
16
- model = Model(base_checkpoint="malteos/scincl", variant="adapters",
17
- adapters_load_from="../../../phantasm/phantasm_new/lightning_logs/full_run/scincl_adapters/checkpoints/",
18
- task_id="[PRX]", all_tasks=["[PRX]"])
19
- encoder = AdapterEncoder("malteos/scincl", ["[PRX]"],
20
- "../../../phantasm/phantasm_new/lightning_logs/full_run/scincl_adapters/checkpoints/model/adapters")
21
- model.encoder = encoder
22
- model.encoder.cuda()
23
- model.encoder.eval()
24
- evaluator = IREvaluator("feeds_1", ("allenai/scirepeval", "feeds_1"), ("allenai/scirepeval_test", "feeds_1"), model,
25
- metrics=("map", "ndcg",))
26
- #
27
- # embeddings = evaluator.generate_embeddings()
28
- #
29
- # evaluator.evaluate(embeddings)
30
-
31
- # evaluator = IREvaluator("feeds_1", ("allenai/scirepeval", "feeds_title"), ("allenai/scirepeval_test", "feeds_title"),
32
- # model, metrics=("map", "ndcg",))
33
- # evaluator = ReviewerMatchingEvaluator("paper reviewer evaluation", ("allenai/scirepeval", "paper_reviewer_matching"),
34
- # ("allenai/scirepeval_test", "paper_reviewer_matching"),
35
- # ("allenai/scirepeval_test", "reviewers"), model, metrics=("map", "ndcg",))
36
-
37
- embeddings = evaluator.generate_embeddings()
38
-
39
- evaluator.evaluate(embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
full_scirepeval_tasks.jsonl DELETED
@@ -1,17 +0,0 @@
1
- {"name":"Feeds-1","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"feeds_1"},"test":{"name":"allenai/scirepeval_test","config":"feeds_1"}},"metrics":["map"]}
2
- {"name":"Feeds-M","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"feeds_m"},"test":{"name":"allenai/scirepeval_test","config":"feeds_m"}},"metrics":["map"]}
3
- {"name":"Highly Influential Citations","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"high_influence_cite"},"test":{"name":"allenai/scirepeval_test","config":"high_influence_cite"}},"metrics":["map"]}
4
- {"name":"SciDocs Cite","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_cite"}},"embeddings":{"save":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
5
- {"name":"SciDocs CoCite","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_cocite"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
6
- {"name":"Fields of study","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"fos"},"test":{"name":"allenai/scirepeval_test","config":"fos"}},"metrics":["f1_macro"],"few_shot":[{"sample_size":10,"iterations":50},{"sample_size":5,"iterations":100}],"multi_label":true}
7
- {"name":"Publication Year","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"pub_year"},"test":{"name":"allenai/scirepeval_test","config":"pub_year"}},"metrics":["kendalltau"]}
8
- {"name":"Search","type":"adhoc_search","data":{"meta":{"name":"allenai/scirepeval","config":"search"},"test":{"name":"allenai/scirepeval_test","config":"search"}},"fields":["title","abstract","venue","year"],"metrics":["ndcg"]}
9
- {"name":"Feeds Title","type":"adhoc_search","data":{"meta":{"name":"allenai/scirepeval","config":"feeds_title"},"test":{"name":"allenai/scirepeval_test","config":"feeds_title"}},"metrics":["map"]}
10
- {"name":"Paper-Reviewer Matching","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"paper_reviewer_matching"},"test":{"name":"allenai/scirepeval_test","config":"paper_reviewer_matching"},"reviewers":{"name":"allenai/scirepeval_test","config":"reviewers"}},"metrics":["P_5", "P_10"]}
11
- {"name":"SciDocs CoView","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_view"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
12
- {"name":"SciDocs CoRead","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_read"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
13
- {"name":"Peer Review Score","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"peer_review_score_hIndex"},"test":{"name":"allenai/scirepeval_test","config":"peer_review_score"}},"embeddings":{"save":"embeddings/peer_review_score_hIndex.jsonl"},"metrics":["kendalltau"]}
14
- {"name":"Max hIndex","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"peer_review_score_hIndex"},"test":{"name":"allenai/scirepeval_test","config":"hIndex"}},"embeddings":{"load":"embeddings/peer_review_score_hIndex.jsonl"},"metrics":["kendalltau"]}
15
- {"name":"Tweet Mentions","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"tweet_mentions"},"test":{"name":"allenai/scirepeval_test","config":"tweet_mentions"}},"metrics":["kendalltau"]}
16
- {"name":"Citation Count","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"cite_count"},"test":{"name":"allenai/scirepeval_test","config":"cite_count"}},"metrics":["kendalltau"]}
17
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
htrans/__init__.py DELETED
File without changes
htrans/act_fns.py DELETED
@@ -1,205 +0,0 @@
1
- # Copyright 2020 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- from collections import OrderedDict
17
-
18
- import torch
19
- from packaging import version
20
- from torch import Tensor, nn
21
- import logging
22
-
23
- logger = logging.getLogger(__name__)
24
-
25
- class PytorchGELUTanh(nn.Module):
26
- """
27
- A fast C implementation of the tanh approximation of the GeLU activation function. See
28
- https://arxiv.org/abs/1606.08415.
29
-
30
- This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
31
- match due to rounding errors.
32
- """
33
-
34
- def __init__(self):
35
- super().__init__()
36
- if version.parse(torch.__version__) < version.parse("1.12.0"):
37
- raise ImportError(
38
- f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
39
- "PytorchGELUTanh. Please upgrade torch."
40
- )
41
-
42
- def forward(self, input: Tensor) -> Tensor:
43
- return nn.functional.gelu(input, approximate="tanh")
44
-
45
-
46
- class NewGELUActivation(nn.Module):
47
- """
48
- Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
49
- the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
50
- """
51
-
52
- def forward(self, input: Tensor) -> Tensor:
53
- return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
54
-
55
-
56
- class GELUActivation(nn.Module):
57
- """
58
- Original Implementation of the GELU activation function in Google BERT repo when initially created. For
59
- information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
60
- torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
61
- Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
62
- """
63
-
64
- def __init__(self, use_gelu_python: bool = False):
65
- super().__init__()
66
- if use_gelu_python:
67
- self.act = self._gelu_python
68
- else:
69
- self.act = nn.functional.gelu
70
-
71
- def _gelu_python(self, input: Tensor) -> Tensor:
72
- return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))
73
-
74
- def forward(self, input: Tensor) -> Tensor:
75
- return self.act(input)
76
-
77
-
78
- class FastGELUActivation(nn.Module):
79
- """
80
- Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
81
- """
82
-
83
- def forward(self, input: Tensor) -> Tensor:
84
- return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
85
-
86
-
87
- class QuickGELUActivation(nn.Module):
88
- """
89
- Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
90
- """
91
-
92
- def forward(self, input: Tensor) -> Tensor:
93
- return input * torch.sigmoid(1.702 * input)
94
-
95
-
96
- class ClippedGELUActivation(nn.Module):
97
- """
98
- Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
99
- it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
100
- https://arxiv.org/abs/2004.09602.
101
-
102
- Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
103
- initially created.
104
-
105
- For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
106
- torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
107
- """
108
-
109
- def __init__(self, min: float, max: float):
110
- if min > max:
111
- raise ValueError(f"min should be < max (got min: {min}, max: {max})")
112
-
113
- super().__init__()
114
- self.min = min
115
- self.max = max
116
-
117
- def forward(self, x: Tensor) -> Tensor:
118
- return torch.clip(gelu(x), self.min, self.max)
119
-
120
-
121
- class SiLUActivation(nn.Module):
122
- """
123
- See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
124
- Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
125
- Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
126
- Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
127
- later.
128
- """
129
-
130
- def forward(self, input: Tensor) -> Tensor:
131
- return nn.functional.silu(input)
132
-
133
-
134
- class MishActivation(nn.Module):
135
- """
136
- See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
137
- visit the official repository for the paper: https://github.com/digantamisra98/Mish
138
- """
139
-
140
- def __init__(self):
141
- super().__init__()
142
- if version.parse(torch.__version__) < version.parse("1.9.0"):
143
- self.act = self._mish_python
144
- else:
145
- self.act = nn.functional.mish
146
-
147
- def _mish_python(self, input: Tensor) -> Tensor:
148
- return input * torch.tanh(nn.functional.softplus(input))
149
-
150
- def forward(self, input: Tensor) -> Tensor:
151
- return self.act(input)
152
-
153
-
154
- class LinearActivation(nn.Module):
155
- """
156
- Applies the linear activation function, i.e. forwarding input directly to output.
157
- """
158
-
159
- def forward(self, input: Tensor) -> Tensor:
160
- return input
161
-
162
-
163
- class ClassInstantier(OrderedDict):
164
- def __getitem__(self, key):
165
- content = super().__getitem__(key)
166
- cls, kwargs = content if isinstance(content, tuple) else (content, {})
167
- return cls(**kwargs)
168
-
169
-
170
- ACT2CLS = {
171
- "gelu": GELUActivation,
172
- "gelu_10": (ClippedGELUActivation, {"min": -10, "max": 10}),
173
- "gelu_fast": FastGELUActivation,
174
- "gelu_new": NewGELUActivation,
175
- "gelu_python": (GELUActivation, {"use_gelu_python": True}),
176
- "gelu_pytorch_tanh": PytorchGELUTanh,
177
- "linear": LinearActivation,
178
- "mish": MishActivation,
179
- "quick_gelu": QuickGELUActivation,
180
- "relu": nn.ReLU,
181
- "relu6": nn.ReLU6,
182
- "sigmoid": nn.Sigmoid,
183
- "silu": SiLUActivation,
184
- "swish": SiLUActivation,
185
- "tanh": nn.Tanh,
186
- }
187
- ACT2FN = ClassInstantier(ACT2CLS)
188
-
189
-
190
- def get_activation(activation_string):
191
- if activation_string in ACT2FN:
192
- return ACT2FN[activation_string]
193
- else:
194
- raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
195
-
196
-
197
- # For backwards compatibility with: from activations import gelu_python
198
- gelu_python = get_activation("gelu_python")
199
- gelu_new = get_activation("gelu_new")
200
- gelu = get_activation("gelu")
201
- gelu_fast = get_activation("gelu_fast")
202
- quick_gelu = get_activation("quick_gelu")
203
- silu = get_activation("silu")
204
- mish = get_activation("mish")
205
- linear_act = get_activation("linear")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
htrans/embedding.py DELETED
@@ -1,272 +0,0 @@
1
- import math
2
- import torch
3
- import torch.nn as nn
4
- from htrans.norms import get_norm_fn
5
- from typing import Tuple
6
- from einops import repeat
7
-
8
- class EmbeddingComponent(nn.Module):
9
- """Component embedding including token embedding, positional embedding and (token type embedding)"""
10
- def __init__(self, config):
11
- # where is token type embds?
12
- super(EmbeddingComponent, self).__init__()
13
- self.token_embedding = nn.Embedding(config.vocab_size, config.emb_dim, padding_idx=config.pad_token_id)
14
- if config.pos_emb == "learned":
15
- self.positional_embedding = LearnedPositional(config.emb_dim, config.max_seq_length)
16
- elif config.pos_emb == "sinusoidal":
17
- self.positional_embedding = SinusoidalPositional(config.emb_dim, config.max_seq_length)
18
- elif config.pos_emb == "scaled-sinusoidal":
19
- self.positional_embedding = ScaledSinosoidal(config.emb_dim, config.max_seq_length)
20
- else:
21
- self.positional_embedding = None
22
-
23
- self.dropout = torch.nn.Dropout(p=config.dropout_prob)
24
- if config.normalization:
25
- self.norm = get_norm_fn(config.norm)(config.emb_dim, eps=config.norm_eps)
26
- else:
27
- self.norm = torch.nn.Identity()
28
-
29
- def forward(self, input_ids):
30
- embeds = self.token_embedding(input_ids)
31
- if self.positional_embedding is not None:
32
- embeds += self.positional_embedding(input_ids)
33
- return self.dropout(self.norm(embeds))
34
-
35
-
36
- class SinusoidalPositional(nn.Module):
37
- """
38
- The original positional embedding used in 'Attention is all you need'
39
- """
40
- def __init__(self, emb_dim, max_seq_length=512):
41
- super(SinusoidalPositional, self).__init__()
42
- pe = torch.zeros(max_seq_length, emb_dim)
43
- position = torch.arange(0, max_seq_length, dtype=torch.float).unsqueeze(1)
44
- div_term = torch.exp(torch.arange(0, emb_dim, 2) * (-math.log(10000) / emb_dim))
45
- pe[:, 0::2] = torch.sin(position * div_term)
46
- pe[:, 1::2] = torch.cos(position * div_term)
47
- # return a 3D pe so it can be broadcasting on the batch_size dimension
48
- self.register_buffer("pe", pe.unsqueeze(0), persistent=False)
49
-
50
- def forward(self, input_ids):
51
- r"""Inputs of forward function
52
- Args:
53
- input_ids: the sequence fed to the positional encoder model (required).
54
- Shape:
55
- input_ids: [batch size, sequence length]
56
- output: [batch size, sequence length, embed dim]
57
- Examples:
58
- >>> output = pos_encoder(x)
59
- """
60
- return self.pe[:, : input_ids.shape[1], :]
61
-
62
-
63
- class ScaledSinosoidal(SinusoidalPositional):
64
- """Sinusoidal with scaling (see FLASH paper)."""
65
-
66
- def __init__(self, embedding_dim, max_seq_length):
67
- super().__init__(embedding_dim, max_seq_length)
68
- self.scale_factor = torch.nn.Parameter(torch.tensor([1.0 / embedding_dim**0.5]))
69
-
70
- def forward(self, input_ids):
71
- r"""Inputs of forward function
72
- Args:
73
- x: the sequence fed to the positional encoder model (required).
74
- Shape:
75
- x: [batch size, sequence length, embed dim]
76
- output: [batch size, sequence length, embed dim]
77
- Examples:
78
- >>> output = pos_encoder(x)
79
- """
80
- return self.scale_factor * self.pe[:, : input_ids.shape[1], :]
81
-
82
- class LearnedPositional(nn.Module):
83
- """Shorthand for a learnable embedding."""
84
- def __init__(self, emb_dim, max_seq_length):
85
- super(LearnedPositional, self).__init__()
86
- self.emb = nn.Embedding(max_seq_length, emb_dim)
87
- self.register_buffer("position_ids", torch.arange(0, max_seq_length).expand(1, -1))
88
-
89
- def forward(self, input_ids):
90
- position_ids = self.position_ids[:, : input_ids.shape[1]]
91
- return self.emb(position_ids)
92
-
93
-
94
- # Code stolen from GPT-X:
95
- class Rotary(torch.nn.Module):
96
- def __init__(self, dim, base=10000, def_seq_length=128, seq_dim: int = 0):
97
- super().__init__()
98
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
99
- self.register_buffer("inv_freq", inv_freq, persistent=True)
100
- self.seq_len_cached = def_seq_length
101
- self.seq_dim = seq_dim
102
- cos_cache, sin_cache = self._get_cos_sin()
103
- self.register_buffer("cos_cached", cos_cache, persistent=False)
104
- self.register_buffer("sin_cached", sin_cache, persistent=False)
105
-
106
- # Force fusions on batched version
107
- def rotate_half(x: torch.Tensor):
108
- x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] # torch.split(x, x.shape[-1] // 2, dim=-1) # not faster
109
- return torch.cat((-x2, x1), dim=-1)
110
-
111
- def rope_fn(cos: torch.Tensor, sin: torch.Tensor, query_layer: torch.Tensor, key_layer: torch.Tensor):
112
- QK = torch.cat([query_layer, key_layer], dim=1)
113
- rotated = QK * cos + rotate_half(QK) * sin
114
- return torch.split(QK, query_layer.shape[1], dim=1)
115
-
116
- self.rope_fn = rope_fn # handle fusion on module level
117
-
118
- @torch.no_grad()
119
- def get_cos_sin_cache(self, x: torch.Tensor):
120
- seq_len = x.shape[self.seq_dim]
121
- if seq_len != self.seq_len_cached:
122
- self.seq_len_cached = x.shape[self.seq_dim]
123
- cos_cache, sin_cache = self._get_cos_sin()
124
- self.cos_cached = cos_cache.to(x.device)
125
- self.sin_cached = sin_cache.to(x.device)
126
- return self.cos_cached, self.sin_cached
127
-
128
- def _get_cos_sin(self):
129
- t = torch.arange(self.seq_len_cached).type_as(self.inv_freq)
130
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
131
- emb = torch.cat((freqs, freqs), dim=-1)
132
- if self.seq_dim == 0:
133
- return emb.cos()[:, None, None, :].detach(), emb.sin()[:, None, None, :].detach()
134
- else:
135
- return emb.cos()[None, :, None, :].detach(), emb.sin()[None, :, None, :].detach()
136
-
137
- def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
138
- return self.rope_fn(self.cos_cached, self.sin_cached, query_layer, key_layer)
139
-
140
- @torch.jit.export
141
- def single_forward(self, inputs: torch.Tensor):
142
- """For cases where shapes of Q and K do not match."""
143
- cos, sin = self.cos_cached[: inputs.shape[0]], self.sin_cached[: inputs.shape[0]]
144
- return inputs * cos + self.rotate_half(inputs) * sin
145
-
146
- def rotate_half(self, x: torch.Tensor):
147
- x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :]
148
- return torch.cat((-x2, x1), dim=-1) # torch.split(x, x.shape[-1] // 2, dim=-1) # not faster
149
-
150
-
151
- class RotarySanityCheck(torch.nn.Module):
152
- """not again..."""
153
-
154
- def __init__(self, dim, base=10000, def_seq_length=128, seq_dim: int = 0):
155
- super().__init__()
156
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
157
- self.register_buffer("inv_freq", inv_freq, persistent=True)
158
- self.seq_len_cached = def_seq_length
159
- self.seq_dim = seq_dim
160
- cos_cache, sin_cache = self._get_cos_sin()
161
- self.register_buffer("cos_cached", cos_cache, persistent=False)
162
- self.register_buffer("sin_cached", sin_cache, persistent=False)
163
-
164
- @torch.no_grad()
165
- def get_cos_sin_cache(self, x: torch.Tensor):
166
- seq_len = x.shape[self.seq_dim]
167
- if seq_len != self.seq_len_cached:
168
- self.seq_len_cached = x.shape[self.seq_dim]
169
- cos_cache, sin_cache = self._get_cos_sin()
170
- self.cos_cached = cos_cache.to(x.device)
171
- self.sin_cached = sin_cache.to(x.device)
172
- return self.cos_cached, self.sin_cached
173
-
174
- def _get_cos_sin(self):
175
- t = torch.arange(self.seq_len_cached).type_as(self.inv_freq)
176
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
177
- emb = torch.cat((freqs, freqs), dim=-1)
178
- if self.seq_dim == 0:
179
- return emb.cos()[:, None, None, :].detach(), emb.sin()[:, None, None, :].detach()
180
- else:
181
- return emb.cos()[None, :, None, :].detach(), emb.sin()[None, :, None, :].detach()
182
-
183
- def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
184
- # cos, sin = self.get_cos_sin_cache(key_layer)
185
- # cos, sin = (cos[offset : query_layer.shape[0] + offset, ...], sin[offset : query_layer.shape[0] + offset, ...])
186
- cos, sin = self.cos_cached, self.sin_cached
187
- return (query_layer * cos) + (self.rotate_half(query_layer) * sin), (key_layer * cos) + (self.rotate_half(key_layer) * sin)
188
-
189
- def rotate_half(self, x: torch.Tensor):
190
- x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :]
191
- return torch.cat((-x2, x1), dim=-1) # torch.split(x, x.shape[-1] // 2, dim=-1) # not faster
192
-
193
- @torch.jit.export
194
- def single_forward(self, inputs: torch.Tensor):
195
- """For cases where shapes of Q and K do not match."""
196
- cos, sin = self.cos_cached[: inputs.shape[0]], self.sin_cached[: inputs.shape[0]]
197
- return inputs * cos + self.rotate_half(inputs) * sin
198
-
199
-
200
- # Adapted from https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/rotary.py who adapted from
201
- # Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/components/positional_embedding/rotary.py
202
- class RotaryEleutherAI(torch.nn.Module):
203
- """
204
- The rotary position embeddings from RoFormer_ (Su et. al).
205
- A crucial insight from the method is that the query and keys are
206
- transformed by rotation matrices which depend on the relative positions.
207
- Other implementations are available in the Rotary Transformer repo_ and in
208
- GPT-NeoX_, GPT-NeoX was an inspiration
209
- .. _RoFormer: https://arxiv.org/abs/2104.09864
210
- .. _repo: https://github.com/ZhuiyiTechnology/roformer
211
- .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
212
- """
213
-
214
- _seq_len_cached: int
215
- # _cos_cached: Optional[torch.Tensor]
216
- # _sin_cached: Optional[torch.Tensor]
217
-
218
- def __init__(self, dim_model: int, *_, **__):
219
- super().__init__()
220
- # Generate and save the inverse frequency buffer (non trainable)
221
- inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
222
- self.register_buffer("inv_freq", inv_freq)
223
-
224
- _cos_cached, _sin_cached = self._update_cos_sin_tables(torch.randn(1, 128, 1), seq_dimension=-2)
225
- self.register_buffer("_cos_cached", _cos_cached, persistent=False)
226
- self.register_buffer("_sin_cached", _sin_cached, persistent=False)
227
-
228
- @torch.jit.ignore
229
- def _update_cos_sin_tables(self, x: torch.Tensor, seq_dimension: int = -2) -> Tuple[torch.Tensor, torch.Tensor]:
230
- seq_len = x.shape[seq_dimension]
231
-
232
- # Reset the tables if the sequence length has changed,
233
- # or if we're on a new device (possibly due to tracing for instance)
234
- # if seq_len != self._seq_len_cached: # or self._cos_cached.device != x.device or self._cos_cached.dtype != x.dtype:
235
- self._seq_len_cached = seq_len
236
- t = torch.arange(x.shape[seq_dimension], device=x.device, dtype=self.inv_freq.dtype)
237
- # Don't do einsum, it converts fp32 to fp16
238
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
239
- freqs = torch.outer(t, self.inv_freq)
240
- cos_cached = repeat(torch.cos(freqs).to(x.dtype), "... d -> ... (d 2)")
241
- sin_cached = repeat(torch.sin(freqs).to(x.dtype), "... d -> ... (d 2)")
242
-
243
- return cos_cached, sin_cached
244
-
245
- def forward(self, q: torch.Tensor, k: torch.Tensor, seq_dimension: int = -2) -> Tuple[torch.Tensor, torch.Tensor]:
246
- # assert seq_dimension in [-2, -3] # Either (bs, h, s, d) or (bs, s, h, d)
247
- # self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=seq_dimension)
248
-
249
- return (
250
- apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached, seq_dimension),
251
- apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached, seq_dimension),
252
- )
253
-
254
-
255
- def rotate_half(x: torch.Tensor):
256
- x = x.unflatten(dim=-1, sizes=(-1, 2))
257
- x1, x2 = x.unbind(dim=-1)
258
- rotated_x = torch.stack((-x2, x1), dim=-1)
259
- return rotated_x.flatten(start_dim=-2)
260
-
261
-
262
- @torch.jit.script
263
- def apply_rotary_pos_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, seq_dimension: int = -2):
264
- # NOTE: This could probably be moved to Triton
265
-
266
- # Handle a possible sequence length mismatch in between q and k
267
- cos = cos[: x.shape[seq_dimension], :]
268
- sin = sin[: x.shape[seq_dimension], :]
269
- if seq_dimension == -3:
270
- cos = cos[:, None, :]
271
- sin = sin[:, None, :]
272
- return (x * cos) + (rotate_half(x) * sin)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
htrans/model/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .modeling_htrans import HTransForPreTraining, HTransModel, HTransForSequenceClassification
2
- from .configuration_htrans import HTransConfig
 
 
 
htrans/model/configuration_htrans.py DELETED
@@ -1,130 +0,0 @@
1
- from transformers import PretrainedConfig
2
-
3
-
4
- class HTransConfig(PretrainedConfig):
5
- r"""
6
- This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to
7
- instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
8
- configuration with the defaults will yield a similar configuration to that of the BERT
9
- [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture.
10
-
11
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
12
- documentation from [`PretrainedConfig`] for more information.
13
-
14
-
15
- Args:
16
- vocab_size (`int`, *optional*, defaults to 30522):
17
- Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
18
- `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
19
- hidden_size (`int`, *optional*, defaults to 768):
20
- Dimensionality of the encoder layers and the pooler layer.
21
- num_hidden_layers (`int`, *optional*, defaults to 12):
22
- Number of hidden layers in the Transformer encoder.
23
- num_attention_heads (`int`, *optional*, defaults to 12):
24
- Number of attention heads for each attention layer in the Transformer encoder.
25
- intermediate_size (`int`, *optional*, defaults to 3072):
26
- Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
27
- hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
28
- The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
29
- `"relu"`, `"silu"` and `"gelu_new"` are supported.
30
- hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
31
- The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
32
- attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
33
- The dropout ratio for the attention probabilities.
34
- max_position_embeddings (`int`, *optional*, defaults to 512):
35
- The maximum sequence length that this model might ever be used with. Typically set this to something large
36
- just in case (e.g., 512 or 1024 or 2048).
37
- type_vocab_size (`int`, *optional*, defaults to 2):
38
- The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
39
- initializer_range (`float`, *optional*, defaults to 0.02):
40
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
41
- layer_norm_eps (`float`, *optional*, defaults to 1e-12):
42
- The epsilon used by the layer normalization layers.
43
- position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
44
- Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
45
- positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
46
- [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
47
- For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
48
- with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
49
- is_decoder (`bool`, *optional*, defaults to `False`):
50
- Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
51
- use_cache (`bool`, *optional*, defaults to `True`):
52
- Whether or not the model should return the last key/values attentions (not used by all models). Only
53
- relevant if `config.is_decoder=True`.
54
- classifier_dropout (`float`, *optional*):
55
- The dropout ratio for the classification head.
56
-
57
- Examples:
58
-
59
- ```python
60
- >>> from transformers import BertConfig, BertModel
61
-
62
- >>> # Initializing a BERT bert-base-uncased style configuration
63
- >>> configuration = BertConfig()
64
-
65
- >>> # Initializing a model (with random weights) from the bert-base-uncased style configuration
66
- >>> model = BertModel(configuration)
67
-
68
- >>> # Accessing the model configuration
69
- >>> configuration = model.config
70
- ```"""
71
- model_type = "bert"
72
-
73
- def __init__(
74
- self,
75
- vocab_size=32768,
76
- hidden_size=768,
77
- num_hidden_layers=12,
78
- num_attention_heads=12,
79
- intermediate_size=3072,
80
- hidden_act="gelu",
81
- hidden_dropout_prob=0.1,
82
- attention_probs_dropout_prob=0.1,
83
- max_position_embeddings=512,
84
- type_vocab_size=2,
85
- initializer_range=0.02,
86
- layer_norm_eps=1e-12,
87
- position_embedding_type="absolute",
88
- use_cache=True,
89
- classifier_dropout=0.1,
90
- use_bias=True,
91
- norm_scheme="post",
92
- pool_scheme="first-token",
93
- pos_emb="sinusoidal",
94
- prediction_head=True,
95
- max_sent_length=64,
96
- max_sec_length=8,
97
- max_doc_length=1,
98
- **kwargs,
99
- ):
100
- super().__init__(**kwargs)
101
-
102
- self.vocab_size = vocab_size
103
- self.hidden_size = hidden_size
104
- self.num_hidden_layers = num_hidden_layers
105
- self.num_attention_heads = num_attention_heads
106
- self.hidden_act = hidden_act
107
- self.intermediate_size = intermediate_size
108
- self.hidden_dropout_prob = hidden_dropout_prob
109
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
110
- self.type_vocab_size = type_vocab_size
111
- self.initializer_range = initializer_range
112
- self.layer_norm_eps = layer_norm_eps
113
- self.position_embedding_type = position_embedding_type
114
- self.use_cache = use_cache
115
- self.classifier_dropout = classifier_dropout
116
- self.use_bias = use_bias
117
- self.norm_scheme = norm_scheme
118
- self.pool_scheme = pool_scheme
119
- self.pos_emb = pos_emb
120
- self.prediction_head = prediction_head
121
- self.max_sec_length = max_sec_length
122
- self.max_sent_length = max_sent_length
123
- self.max_doc_length = max_doc_length
124
- self.max_position_embeddings = max_sec_length * max_sent_length * max_doc_length
125
- self.bos_token_id = kwargs.pop("bos_token_id", None)
126
- self.pad_token_id = kwargs.pop("pad_token_id", None)
127
- self.eos_token_id = kwargs.pop("eos_token_id", None)
128
- self.sep_token_id = kwargs.pop("sep_token_id", None)
129
-
130
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
htrans/model/modeling_htrans.py DELETED
@@ -1,1283 +0,0 @@
1
- """This is a script for modeling bert under our (huggingface) scheme. Most of the code is directly copied from huggingface transformers for reference"""
2
- import os
3
- import math
4
- import torch
5
- import logging
6
- import torch.nn as nn
7
- from ..act_fns import ACT2FN
8
- from transformers import PreTrainedModel
9
- from ..pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer, apply_chunking_to_forward
10
- from ..embedding import SinusoidalPositional, ScaledSinosoidal, LearnedPositional
11
- from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, BaseModelOutputWithPoolingAndCrossAttentions, SequenceClassifierOutput
12
- from .configuration_htrans import HTransConfig
13
- from typing import Optional, Tuple, Union, List
14
- from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
20
- """Load tf checkpoints in a pytorch model."""
21
- try:
22
- import re
23
-
24
- import numpy as np
25
- import tensorflow as tf
26
- except ImportError:
27
- logger.error(
28
- "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
29
- "https://www.tensorflow.org/install/ for installation instructions."
30
- )
31
- raise
32
- tf_path = os.path.abspath(tf_checkpoint_path)
33
- logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
34
- # Load weights from TF model
35
- init_vars = tf.train.list_variables(tf_path)
36
- names = []
37
- arrays = []
38
- for name, shape in init_vars:
39
- logger.info(f"Loading TF weight {name} with shape {shape}")
40
- array = tf.train.load_variable(tf_path, name)
41
- names.append(name)
42
- arrays.append(array)
43
-
44
- for name, array in zip(names, arrays):
45
- name = name.split("/")
46
- # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
47
- # which are not required for using pretrained model
48
- if any(
49
- n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
50
- for n in name
51
- ):
52
- logger.info(f"Skipping {'/'.join(name)}")
53
- continue
54
- pointer = model
55
- for m_name in name:
56
- if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
57
- scope_names = re.split(r"_(\d+)", m_name)
58
- else:
59
- scope_names = [m_name]
60
- if scope_names[0] == "kernel" or scope_names[0] == "gamma":
61
- pointer = getattr(pointer, "weight")
62
- elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
63
- pointer = getattr(pointer, "bias")
64
- elif scope_names[0] == "output_weights":
65
- pointer = getattr(pointer, "weight")
66
- elif scope_names[0] == "squad":
67
- pointer = getattr(pointer, "classifier")
68
- else:
69
- try:
70
- pointer = getattr(pointer, scope_names[0])
71
- except AttributeError:
72
- logger.info(f"Skipping {'/'.join(name)}")
73
- continue
74
- if len(scope_names) >= 2:
75
- num = int(scope_names[1])
76
- pointer = pointer[num]
77
- if m_name[-11:] == "_embeddings":
78
- pointer = getattr(pointer, "weight")
79
- elif m_name == "kernel":
80
- array = np.transpose(array)
81
- try:
82
- if pointer.shape != array.shape:
83
- raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
84
- except AssertionError as e:
85
- e.args += (pointer.shape, array.shape)
86
- raise
87
- logger.info(f"Initialize PyTorch weight {name}")
88
- pointer.data = torch.from_numpy(array)
89
- return model
90
-
91
- class PositionEmbeddings(nn.Module):
92
- def __init__(self, config):
93
- super().__init__()
94
- if config.pos_emb == "learned":
95
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
96
- elif config.pos_emb == "sinusoidal":
97
- self.position_embeddings = SinusoidalPositional(config.hidden_size, config.max_position_embeddings)
98
- elif config.pos_emb == "scaled-sinusoidal":
99
- self.position_embeddings = ScaledSinosoidal(config.hidden_size, config.max_position_embeddings)
100
- else:
101
- raise NotImplementedError(f"Positional embedding {config.pos_emb} is not a valid choice")
102
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
103
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
104
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
105
- self.register_buffer("sec_position_ids", torch.arange(config.max_sec_length*config.max_doc_length+config.max_doc_length).expand((1, -1)))
106
- self.register_buffer("doc_position_ids", torch.arange(config.max_doc_length).expand((1, -1)))
107
-
108
- def forward(self, embeddings, hierarchy="sec"):
109
- if self.position_embedding_type == "absolute":
110
- position_embeddings = self.position_embeddings(self.sec_position_ids if hierarchy=="sec" else self.doc_position_ids)
111
- embeddings += position_embeddings
112
- embeddings = self.LayerNorm(embeddings)
113
- embeddings = self.dropout(embeddings)
114
- return embeddings
115
-
116
- class HTransEmbeddings(nn.Module):
117
- """Construct the embeddings from word, position and token_type embeddings."""
118
-
119
- def __init__(self, config):
120
- super().__init__()
121
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
122
- if config.pos_emb == "learned":
123
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
124
- elif config.pos_emb == "sinusoidal":
125
- self.position_embeddings = SinusoidalPositional(config.hidden_size, config.max_position_embeddings)
126
- elif config.pos_emb == "scaled-sinusoidal":
127
- self.position_embeddings = ScaledSinosoidal(config.hidden_size, config.max_position_embeddings)
128
- else:
129
- raise NotImplementedError(f"Positional embedding {config.pos_emb} is not a valid choice")
130
- # self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
131
- # For now we don't use token type embeddings but might need it back in the future
132
- # self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
133
-
134
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
135
- # any TensorFlow checkpoint file
136
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
137
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
138
- # position_ids (1, len position emb) is contiguous in memory and exported when serialized
139
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
140
- self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
141
- self.register_buffer(
142
- "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
143
- )
144
-
145
- def forward(
146
- self,
147
- input_ids: Optional[torch.LongTensor] = None,
148
- token_type_ids: Optional[torch.LongTensor] = None,
149
- position_ids: Optional[torch.LongTensor] = None,
150
- inputs_embeds: Optional[torch.FloatTensor] = None,
151
- past_key_values_length: int = 0,
152
- ) -> torch.Tensor:
153
- if input_ids is not None:
154
- input_shape = input_ids.size()
155
- else:
156
- input_shape = inputs_embeds.size()[:-1]
157
-
158
- seq_length = input_shape[1]
159
-
160
- if position_ids is None:
161
- position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
162
-
163
- # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
164
- # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
165
- # issue #5664
166
- # if token_type_ids is None:
167
- # if hasattr(self, "token_type_ids"):
168
- # buffered_token_type_ids = self.token_type_ids[:, :seq_length]
169
- # buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
170
- # token_type_ids = buffered_token_type_ids_expanded
171
- # else:
172
- # token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
173
-
174
- if inputs_embeds is None:
175
- inputs_embeds = self.word_embeddings(input_ids)
176
- # token_type_embeddings = self.token_type_embeddings(token_type_ids)
177
-
178
- # embeddings = inputs_embeds + token_type_embeddings
179
- embeddings = inputs_embeds
180
- if self.position_embedding_type == "absolute":
181
- position_embeddings = self.position_embeddings(position_ids)
182
- embeddings += position_embeddings
183
- embeddings = self.LayerNorm(embeddings)
184
- embeddings = self.dropout(embeddings)
185
- return embeddings
186
-
187
-
188
- class HTransSelfAttention(nn.Module):
189
- def __init__(self, config, position_embedding_type=None, sent_length=512, sec_length=1, doc_length=1):
190
- super().__init__()
191
- if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
192
- raise ValueError(
193
- f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
194
- f"heads ({config.num_attention_heads})"
195
- )
196
-
197
- self.num_attention_heads = config.num_attention_heads
198
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
199
- self.all_head_size = self.num_attention_heads * self.attention_head_size
200
-
201
- self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
202
- self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
203
- self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
204
-
205
- self.sent_length = sent_length
206
- self.sec_length = sec_length
207
- self.doc_length = doc_length
208
-
209
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
210
- self.position_embedding_type = position_embedding_type or getattr(
211
- config, "position_embedding_type", "absolute"
212
- )
213
- if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
214
- self.max_position_embeddings = config.max_position_embeddings
215
- self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
216
-
217
- self.is_decoder = config.is_decoder
218
-
219
- def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
220
- if self.sec_length == 1:
221
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
222
- x = x.view(new_x_shape)
223
- return x.permute(0, 2, 1, 3)
224
- elif self.doc_length == 1:
225
- new_x_shape = x.size()[:-2] + (self.sec_length, self.sent_length, self.num_attention_heads, self.attention_head_size)
226
- x = x.view(new_x_shape)
227
- return x.permute(0, 1, 3, 2, 4)
228
- else:
229
- new_x_shape = x.size()[:-2] + (self.doc_length, self.sec_length, self.sent_length, self.num_attention_heads, self.attention_head_size)
230
- x = x.view(new_x_shape)
231
- return x.permute(0, 1, 2, 4, 3, 5)
232
- def forward(
233
- self,
234
- hidden_states: torch.Tensor,
235
- attention_mask: Optional[torch.FloatTensor] = None,
236
- head_mask: Optional[torch.FloatTensor] = None,
237
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
238
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
239
- past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
240
- output_attentions: Optional[bool] = False,
241
- ) -> Tuple[torch.Tensor]:
242
- mixed_query_layer = self.query(hidden_states)
243
-
244
- # If this is instantiated as a cross-attention module, the keys
245
- # and values come from an encoder; the attention mask needs to be
246
- # such that the encoder's padding tokens are not attended to.
247
- is_cross_attention = encoder_hidden_states is not None
248
-
249
- if is_cross_attention and past_key_value is not None:
250
- # reuse k,v, cross_attentions
251
- key_layer = past_key_value[0]
252
- value_layer = past_key_value[1]
253
- attention_mask = encoder_attention_mask
254
- elif is_cross_attention:
255
- key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
256
- value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
257
- attention_mask = encoder_attention_mask
258
- elif past_key_value is not None:
259
- key_layer = self.transpose_for_scores(self.key(hidden_states))
260
- value_layer = self.transpose_for_scores(self.value(hidden_states))
261
- key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
262
- value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
263
- else:
264
- key_layer = self.transpose_for_scores(self.key(hidden_states))
265
- value_layer = self.transpose_for_scores(self.value(hidden_states))
266
-
267
- query_layer = self.transpose_for_scores(mixed_query_layer)
268
-
269
- use_cache = past_key_value is not None
270
- if self.is_decoder:
271
- # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
272
- # Further calls to cross_attention layer can then reuse all cross-attention
273
- # key/value_states (first "if" case)
274
- # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
275
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
276
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
277
- # if encoder bi-directional self-attention `past_key_value` is always `None`
278
- past_key_value = (key_layer, value_layer)
279
-
280
- # Take the dot product between "query" and "key" to get the raw attention scores.
281
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
282
- # if self.seg_num == 1:
283
- # attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
284
- # else:
285
- # attention_scores = torch.concatenate([torch.matmul(query_layer[:, :, j * self.seg_length: (j + 1) * self.seg_length, :], key_layer[:, :, j * self.seg_length: (j + 1) * self.seg_length, :].transpose(-1, -2)) for j in range(self.seg_num)], dim=-1)
286
- if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
287
- # TODO: relative positional embedding for hierarchical attention
288
- query_length, key_length = query_layer.shape[2], key_layer.shape[2]
289
- if use_cache:
290
- position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
291
- -1, 1
292
- )
293
- else:
294
- position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
295
- position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
296
- distance = position_ids_l - position_ids_r
297
-
298
- positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
299
- positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
300
-
301
- if self.position_embedding_type == "relative_key":
302
- relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
303
- attention_scores = attention_scores + relative_position_scores
304
- elif self.position_embedding_type == "relative_key_query":
305
- relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
306
- relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
307
- attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
308
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
309
- if attention_mask is not None:
310
- if self.sec_length > 1:
311
- if self.doc_length > 1:
312
- new_mask_shape = (attention_mask.shape[0], self.doc_length, self.sec_length, 1, 1, self.sent_length)
313
- else:
314
- new_mask_shape = (attention_mask.shape[0], self.sec_length, 1, 1, self.sent_length)
315
- attention_mask = attention_mask.view(new_mask_shape)
316
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
317
- attention_scores = attention_scores + attention_mask
318
-
319
- # Normalize the attention scores to probabilities.
320
- attention_probs = nn.functional.softmax(attention_scores, dim=-1)
321
-
322
- # This is actually dropping out entire tokens to attend to, which might
323
- # seem a bit unusual, but is taken from the original Transformer paper.
324
- attention_probs = self.dropout(attention_probs)
325
-
326
- # Mask heads if we want to
327
- if head_mask is not None:
328
- if self.sec_length > 1:
329
- if self.doc_length > 1:
330
- new_mask_shape = (head_mask.shape[0], self.doc_length, self.sec_length, 1, 1, self.sent_length)
331
- else:
332
- new_mask_shape = (head_mask.shape[0], self.sec_length, 1, 1, self.sent_length)
333
- head_mask = head_mask.view(new_mask_shape)
334
- attention_probs = attention_probs * head_mask
335
-
336
- context_layer = torch.matmul(attention_probs, value_layer)
337
- if self.doc_length > 1:
338
- context_layer = context_layer.permute(0, 1, 2, 4, 3, 5).contiguous()
339
- new_context_layer_shape = context_layer.size()[:-5] + (self.doc_length * self.sec_length * self.sent_length, self.all_head_size,)
340
- elif self.sec_length > 1:
341
- context_layer = context_layer.permute(0, 1, 3, 2, 4).contiguous()
342
- new_context_layer_shape = context_layer.size()[:-4] + (self.sec_length * self.sent_length, self.all_head_size,)
343
- else:
344
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
345
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
346
-
347
- context_layer = context_layer.view(new_context_layer_shape)
348
-
349
- outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
350
-
351
- if self.is_decoder:
352
- outputs = outputs + (past_key_value,)
353
- return outputs
354
-
355
-
356
- class HTransSelfOutput(nn.Module):
357
- def __init__(self, config):
358
- super().__init__()
359
- self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.use_bias)
360
- if config.norm_scheme == "post":
361
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
362
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
363
- self.norm_scheme = config.norm_scheme
364
-
365
- def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
366
- hidden_states = self.dense(hidden_states)
367
- hidden_states = self.dropout(hidden_states)
368
- hidden_states = hidden_states + input_tensor
369
- if self.norm_scheme == "post":
370
- hidden_states = self.LayerNorm(hidden_states)
371
-
372
- return hidden_states
373
-
374
-
375
- class HTransAttention(nn.Module):
376
- def __init__(self, config, position_embedding_type=None, sent_length=512, sec_length=1, doc_length=1):
377
- super().__init__()
378
- self.self = HTransSelfAttention(config, position_embedding_type=position_embedding_type, sent_length=sent_length, sec_length=sec_length, doc_length=doc_length)
379
- self.output = HTransSelfOutput(config)
380
- self.pruned_heads = set()
381
- self.norm_scheme = config.norm_scheme
382
- if self.norm_scheme == "pre":
383
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
384
-
385
- def prune_heads(self, heads):
386
- if len(heads) == 0:
387
- return
388
- heads, index = find_pruneable_heads_and_indices(
389
- heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
390
- )
391
-
392
- # Prune linear layers
393
- self.self.query = prune_linear_layer(self.self.query, index)
394
- self.self.key = prune_linear_layer(self.self.key, index)
395
- self.self.value = prune_linear_layer(self.self.value, index)
396
- self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
397
-
398
- # Update hyper params and store pruned heads
399
- self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
400
- self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
401
- self.pruned_heads = self.pruned_heads.union(heads)
402
-
403
-
404
- def forward(
405
- self,
406
- hidden_states: torch.Tensor,
407
- attention_mask: Optional[torch.FloatTensor] = None,
408
- head_mask: Optional[torch.FloatTensor] = None,
409
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
410
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
411
- past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
412
- output_attentions: Optional[bool] = False,
413
- ) -> Tuple[torch.Tensor]:
414
- if self.norm_scheme == "pre":
415
- input_tensors = hidden_states
416
- hidden_states = self.LayerNorm(hidden_states)
417
- else:
418
- input_tensors = hidden_states
419
- self_outputs = self.self(
420
- hidden_states,
421
- attention_mask,
422
- head_mask,
423
- encoder_hidden_states,
424
- encoder_attention_mask,
425
- past_key_value,
426
- output_attentions,
427
- )
428
- attention_output = self.output(self_outputs[0], input_tensors)
429
- outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
430
- return outputs
431
-
432
-
433
- class HTransIntermediate(nn.Module):
434
- def __init__(self, config):
435
- super().__init__()
436
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias)
437
- self.norm_scheme = config.norm_scheme
438
- if self.norm_scheme == "pre":
439
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
440
- if isinstance(config.hidden_act, str):
441
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
442
- else:
443
- self.intermediate_act_fn = config.hidden_act
444
-
445
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
446
- if self.norm_scheme == "pre":
447
- hidden_states = self.LayerNorm(hidden_states)
448
- hidden_states = self.dense(hidden_states)
449
- hidden_states = self.intermediate_act_fn(hidden_states)
450
- return hidden_states
451
-
452
-
453
- class HTransOutput(nn.Module):
454
- def __init__(self, config):
455
- super().__init__()
456
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.use_bias)
457
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
458
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
459
- self.norm_scheme = config.norm_scheme
460
-
461
- def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
462
- hidden_states = self.dense(hidden_states)
463
- hidden_states = self.dropout(hidden_states)
464
- hidden_states = hidden_states + input_tensor
465
- if self.norm_scheme == "post":
466
- hidden_states = self.LayerNorm(hidden_states)
467
- return hidden_states
468
-
469
-
470
- class HTransLayer(nn.Module):
471
- def __init__(self, config, sent_length=512, sec_length=1, doc_length=1):
472
- super().__init__()
473
- self.chunk_size_feed_forward = config.chunk_size_feed_forward
474
- self.seq_len_dim = 1
475
- self.attention = HTransAttention(config, sent_length=sent_length, sec_length=sec_length, doc_length=doc_length)
476
- self.is_decoder = config.is_decoder
477
- self.add_cross_attention = config.add_cross_attention
478
- if self.add_cross_attention:
479
- if not self.is_decoder:
480
- raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
481
- self.crossattention = HTransAttention(config, position_embedding_type="absolute", sent_length=sent_length, sec_length=sec_length, doc_length=doc_length)
482
- self.intermediate = HTransIntermediate(config)
483
- self.output = HTransOutput(config)
484
-
485
- def forward(
486
- self,
487
- hidden_states: torch.Tensor,
488
- attention_mask: Optional[torch.FloatTensor] = None,
489
- head_mask: Optional[torch.FloatTensor] = None,
490
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
491
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
492
- past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
493
- output_attentions: Optional[bool] = False,
494
- ) -> Tuple[torch.Tensor]:
495
- # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
496
- self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
497
- self_attention_outputs = self.attention(
498
- hidden_states,
499
- attention_mask,
500
- head_mask,
501
- output_attentions=output_attentions,
502
- past_key_value=self_attn_past_key_value,
503
- )
504
- attention_output = self_attention_outputs[0]
505
-
506
- # if decoder, the last output is tuple of self-attn cache
507
- if self.is_decoder:
508
- outputs = self_attention_outputs[1:-1]
509
- present_key_value = self_attention_outputs[-1]
510
- else:
511
- outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
512
-
513
- cross_attn_present_key_value = None
514
- if self.is_decoder and encoder_hidden_states is not None:
515
- if not hasattr(self, "crossattention"):
516
- raise ValueError(
517
- f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
518
- " by setting `config.add_cross_attention=True`"
519
- )
520
-
521
- # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
522
- cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
523
- cross_attention_outputs = self.crossattention(
524
- attention_output,
525
- attention_mask,
526
- head_mask,
527
- encoder_hidden_states,
528
- encoder_attention_mask,
529
- cross_attn_past_key_value,
530
- output_attentions,
531
- )
532
- attention_output = cross_attention_outputs[0]
533
- outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
534
-
535
- # add cross-attn cache to positions 3,4 of present_key_value tuple
536
- cross_attn_present_key_value = cross_attention_outputs[-1]
537
- present_key_value = present_key_value + cross_attn_present_key_value
538
-
539
- layer_output = apply_chunking_to_forward(
540
- self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
541
- )
542
- outputs = (layer_output,) + outputs
543
-
544
- # if decoder, return the attn key/values as the last output
545
- if self.is_decoder:
546
- outputs = outputs + (present_key_value,)
547
-
548
- return outputs
549
-
550
- def feed_forward_chunk(self, attention_output):
551
- intermediate_output = self.intermediate(attention_output)
552
- layer_output = self.output(intermediate_output, attention_output)
553
- return layer_output
554
-
555
-
556
- # class HTransLayer(nn.Module):
557
- # def __init__(self, config):
558
- # super().__init__()
559
- # self.sent_trans_layer_1 = TransformerLayer(config)
560
- # self.sent_trans_layer_2 = TransformerLayer(config)
561
- # self.sec_trans_layer = TransformerLayer(config)
562
- # self.max_sent_length = config.max_sent_length
563
- # self.max_sec_length = config.max_sec_length
564
- #
565
- # def forward(
566
- # self,
567
- # hidden_states: torch.Tensor,
568
- # attention_mask: Optional[torch.FloatTensor] = None,
569
- # head_mask: Optional[torch.FloatTensor] = None,
570
- # encoder_hidden_states: Optional[torch.FloatTensor] = None,
571
- # encoder_attention_mask: Optional[torch.FloatTensor] = None,
572
- # past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
573
- # output_attentions: Optional[bool] = False,
574
- # ) -> Tuple[torch.Tensor]:
575
- # # TODO: adapt head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value
576
- # sent_outputs = [self.sent_trans_layer_1(
577
- # hidden_states[:, i*self.max_sent_length: (i+1)*self.max_sent_length, :],
578
- # attention_mask[:, :, :, i*self.max_sent_length: (i+1)*self.max_sent_length] if attention_mask is not None else None,
579
- # output_attentions=output_attentions
580
- # )[0] for i in range(self.max_sec_length)]
581
- # sec_outputs = self.sec_trans_layer(torch.concatenate([i[:, 0:1, :] for i in sent_outputs], axis=1))[0]
582
- # hidden_sec_states = hidden_states.clone()
583
- # hidden_sec_states[:, [i*self.max_sent_length for i in range(self.max_sec_length)]] = sec_outputs
584
- # layer_outputs = [self.sent_trans_layer_2(
585
- # hidden_sec_states[:, i * self.max_sent_length: (i + 1) * self.max_sent_length, :],
586
- # attention_mask[:, :, :,
587
- # i * self.max_sent_length: (i + 1) * self.max_sent_length] if attention_mask is not None else None,
588
- # output_attentions=output_attentions
589
- # )[0] for i in range(self.max_sec_length)]
590
- # return (torch.concatenate(layer_outputs, axis=1), )
591
-
592
- class HTransEncoder(nn.Module):
593
- def __init__(self, config):
594
- super().__init__()
595
- self.config = config
596
- self.hi_position_embeddings = PositionEmbeddings(config)
597
- self.sent_layer = nn.ModuleList([HTransLayer(config, sent_length=config.max_sent_length, sec_length=config.max_sec_length, doc_length=config.max_doc_length) for _ in range(config.num_hidden_layers)])
598
- if config.max_doc_length > 1:
599
- self.doc_layer = nn.ModuleList(
600
- [HTransLayer(config, sent_length=config.max_doc_length) for _ in range(config.num_hidden_layers)])
601
- self.sec_layer = nn.ModuleList([HTransLayer(config, sent_length=self.config.max_sec_length + 1, sec_length=config.max_doc_length) for _ in range(config.num_hidden_layers)])
602
- self.gradient_checkpointing = False
603
-
604
- def forward(
605
- self,
606
- hidden_states: torch.Tensor,
607
- attention_mask: Optional[torch.FloatTensor] = None,
608
- head_mask: Optional[torch.FloatTensor] = None,
609
- sec_mask: Optional[torch.FloatTensor] = None,
610
- doc_mask: Optional[torch.FloatTensor] = None,
611
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
612
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
613
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
614
- use_cache: Optional[bool] = None,
615
- output_attentions: Optional[bool] = False,
616
- output_hidden_states: Optional[bool] = False,
617
- return_dict: Optional[bool] = True,
618
- sec_head_emb: Optional[torch.Tensor] = None,
619
- doc_head_emb: Optional[torch.Tensor] = None,
620
- ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
621
- all_hidden_states = () if output_hidden_states else None
622
- all_self_attentions = () if output_attentions else None
623
- all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
624
-
625
- next_decoder_cache = () if use_cache else None
626
- if self.config.max_doc_length > 1:
627
- sec_head_emb = sec_head_emb.unsqueeze(1).expand(-1, self.config.max_doc_length, -1, -1)
628
- sec_new_shape = (hidden_states.shape[0], self.config.max_doc_length, self.config.max_sec_length, hidden_states.shape[-1])
629
-
630
- for i, layer_module in enumerate(self.sent_layer):
631
- if output_hidden_states:
632
- all_hidden_states = all_hidden_states + (hidden_states,)
633
-
634
- layer_head_mask = head_mask[i] if head_mask is not None else None
635
- past_key_value = past_key_values[i] if past_key_values is not None else None
636
-
637
- if self.gradient_checkpointing and self.training:
638
- # TODO: add gradient checkpointing support for hierarchical attention
639
- if use_cache:
640
- logger.warning(
641
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
642
- )
643
- use_cache = False
644
-
645
- def create_custom_forward(module):
646
- def custom_forward(*inputs):
647
- return module(*inputs, past_key_value, output_attentions)
648
-
649
- return custom_forward
650
-
651
- layer_outputs = torch.utils.checkpoint.checkpoint(
652
- create_custom_forward(layer_module),
653
- hidden_states,
654
- attention_mask,
655
- layer_head_mask,
656
- encoder_hidden_states,
657
- encoder_attention_mask,
658
- )
659
- else:
660
- layer_outputs = layer_module(
661
- hidden_states,
662
- attention_mask,
663
- layer_head_mask,
664
- encoder_hidden_states,
665
- encoder_attention_mask,
666
- past_key_value,
667
- output_attentions,
668
- )
669
-
670
- hidden_states = layer_outputs[0]
671
- if self.config.pool_scheme == "first-token":
672
- # sec_inputs = torch.select(hidden_states, -2, 0)
673
- sec_inputs = hidden_states[:, range(0, self.config.max_doc_length*self.config.max_sec_length*self.config.max_sent_length, self.config.max_sent_length) ,:]
674
- elif self.config.pool_scheme == "avg":
675
- sec_inputs = torch.mean(hidden_states.view((hidden_states.shape[0], self.config.max_sec_length, self.config.max_sent_length, hidden_states.shape[-1])), dim=-2)
676
- elif self.config.pool_scheme == "max":
677
- sec_inputs = torch.max(hidden_states.view((hidden_states.shape[0], self.config.max_sec_length, self.config.max_sent_length, hidden_states.shape[-1])), dim=-2)[0]
678
- else:
679
- raise NotImplementedError(f"Pooling method {self.config.pool_scheme} is not implemented")
680
-
681
- if self.config.max_doc_length > 1:
682
- sec_inputs = torch.concat(
683
- [sec_head_emb,
684
- sec_inputs.view(sec_new_shape)],
685
- dim=-2)
686
- else:
687
- sec_inputs = torch.concat([sec_head_emb, sec_inputs], dim=-2)
688
- sec_outputs = self.sec_layer[i](self.hi_position_embeddings(sec_inputs.view(hidden_states.shape[0], -1, hidden_states.shape[-1]), "sec"), attention_mask=sec_mask)[0]
689
- if self.config.max_doc_length > 1:
690
- doc_inputs, token_head_embedding = torch.split(sec_outputs.view(hidden_states.shape[0], self.config.max_doc_length, self.config.max_sec_length+1, hidden_states.shape[-1]), (1, self.config.max_sec_length), -2)
691
- doc_inputs = doc_inputs.squeeze(-2).clone()
692
- else:
693
- token_head_embedding = sec_outputs[:, 1:, :]
694
- hidden_sec_states = hidden_states.clone()
695
- hidden_sec_states[:, range(0, self.config.max_sec_length * self.config.max_sent_length * self.config.max_doc_length, self.config.max_sent_length),
696
- :] = token_head_embedding.contiguous().view(hidden_states.shape[0], self.config.max_sec_length * self.config.max_doc_length, hidden_states.shape[-1])
697
- hidden_states = hidden_sec_states
698
-
699
-
700
- if self.config.max_doc_length > 1:
701
- doc_outputs = self.doc_layer[i](self.hi_position_embeddings(doc_inputs, "doc"), attention_mask=doc_mask)[0]
702
- sec_head_emb = doc_outputs.unsqueeze(-2)
703
-
704
- if use_cache:
705
- next_decoder_cache += (layer_outputs[-1],)
706
- if output_attentions:
707
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
708
- if self.config.add_cross_attention:
709
- all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
710
-
711
- if output_hidden_states:
712
- all_hidden_states = all_hidden_states + (hidden_states,)
713
-
714
- if not return_dict:
715
- return tuple(
716
- v
717
- for v in [
718
- hidden_states,
719
- next_decoder_cache,
720
- all_hidden_states,
721
- all_self_attentions,
722
- all_cross_attentions,
723
- ]
724
- if v is not None
725
- )
726
- return BaseModelOutputWithPastAndCrossAttentions(
727
- last_hidden_state=hidden_states,
728
- past_key_values=next_decoder_cache,
729
- hidden_states=all_hidden_states,
730
- attentions=all_self_attentions,
731
- cross_attentions=all_cross_attentions,
732
- )
733
-
734
-
735
- class HTransPooler(nn.Module):
736
- def __init__(self, config):
737
- super().__init__()
738
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
739
- self.activation = nn.Tanh()
740
- self.pool_scheme = config.pool_scheme
741
-
742
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
743
- # We "pool" the model by simply taking the hidden state corresponding
744
- # to the first token.
745
- if self.pool_scheme == "first-token":
746
- first_token_tensor = hidden_states[:, 0]
747
- elif self.pool_scheme == "avg":
748
- first_token_tensor = hidden_states.mean(dim=1)
749
- elif self.pool_scheme == "max":
750
- first_token_tensor = hidden_states.max(dim=1)[0]
751
- else:
752
- raise NotImplemented(f"{self.pool_scheme} is not a valid pooling scheme")
753
- pooled_output = self.dense(first_token_tensor)
754
- pooled_output = self.activation(pooled_output)
755
- return pooled_output
756
-
757
-
758
- class HTransPredictionHeadTransform(nn.Module):
759
- def __init__(self, config):
760
- super().__init__()
761
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
762
- if isinstance(config.hidden_act, str):
763
- self.transform_act_fn = ACT2FN[config.hidden_act]
764
- else:
765
- self.transform_act_fn = config.hidden_act
766
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
767
-
768
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
769
- hidden_states = self.dense(hidden_states)
770
- hidden_states = self.transform_act_fn(hidden_states)
771
- hidden_states = self.LayerNorm(hidden_states)
772
- return hidden_states
773
-
774
-
775
- class HTransLMPredictionHead(nn.Module):
776
- def __init__(self, config):
777
- super().__init__()
778
- self.prediction_head = config.prediction_head
779
- if self.prediction_head:
780
- self.transform = HTransPredictionHeadTransform(config)
781
-
782
- # The output weights are the same as the input embeddings, but there is
783
- # an output-only bias for each token.
784
- self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
785
-
786
- self.bias = nn.Parameter(torch.zeros(config.vocab_size))
787
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
788
- # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
789
- self.decoder.bias = self.bias
790
-
791
- def forward(self, hidden_states):
792
- hidden_states = self.LayerNorm(hidden_states)
793
- if self.prediction_head:
794
- hidden_states = self.transform(hidden_states)
795
- hidden_states = self.decoder(hidden_states)
796
- return hidden_states
797
-
798
-
799
- class HTransOnlyMLMHead(nn.Module):
800
- def __init__(self, config):
801
- super().__init__()
802
- self.predictions = HTransLMPredictionHead(config)
803
-
804
- def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
805
- prediction_scores = self.predictions(sequence_output)
806
- return prediction_scores
807
-
808
-
809
- class HTransOnlyNSPHead(nn.Module):
810
- def __init__(self, config):
811
- super().__init__()
812
- self.seq_relationship = nn.Linear(config.hidden_size, 2)
813
-
814
- def forward(self, pooled_output):
815
- seq_relationship_score = self.seq_relationship(pooled_output)
816
- return seq_relationship_score
817
-
818
-
819
- class HTransPreTrainingHeads(nn.Module):
820
- def __init__(self, config):
821
- super().__init__()
822
- self.predictions = HTransLMPredictionHead(config)
823
- self.seq_relationship = nn.Linear(config.hidden_size, 2)
824
-
825
- def forward(self, sequence_output, pooled_output):
826
- prediction_scores = self.predictions(sequence_output)
827
- seq_relationship_score = self.seq_relationship(pooled_output)
828
- return prediction_scores, seq_relationship_score
829
-
830
-
831
- class HTransPreTrainedModel(PreTrainedModel):
832
- """
833
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
834
- models.
835
- """
836
-
837
- config_class = HTransConfig
838
- load_tf_weights = load_tf_weights_in_bert
839
- base_model_prefix = "bert"
840
- supports_gradient_checkpointing = True
841
- _keys_to_ignore_on_load_missing = [r"position_ids"]
842
-
843
- def _init_weights(self, module):
844
- """Initialize the weights"""
845
- if isinstance(module, nn.Linear):
846
- # Slightly different from the TF version which uses truncated_normal for initialization
847
- # cf https://github.com/pytorch/pytorch/pull/5617
848
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
849
- if module.bias is not None:
850
- module.bias.data.zero_()
851
- elif isinstance(module, nn.Embedding):
852
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
853
- if module.padding_idx is not None:
854
- module.weight.data[module.padding_idx].zero_()
855
- elif isinstance(module, nn.LayerNorm):
856
- module.bias.data.zero_()
857
- module.weight.data.fill_(1.0)
858
-
859
- def _set_gradient_checkpointing(self, module, value=False):
860
- if isinstance(module, HTransEncoder):
861
- module.gradient_checkpointing = value
862
-
863
-
864
- class HTransForPreTrainingOutput(ModelOutput):
865
- """
866
- Output type of [`BertForPreTraining`].
867
-
868
- Args:
869
- loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
870
- Total loss as the sum of the masked language modeling loss and the next sequence prediction
871
- (classification) loss.
872
- prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
873
- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
874
- seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
875
- Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
876
- before SoftMax).
877
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
878
- Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
879
- shape `(batch_size, sequence_length, hidden_size)`.
880
-
881
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
882
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
883
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
884
- sequence_length)`.
885
-
886
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
887
- heads.
888
- """
889
-
890
- loss: Optional[torch.FloatTensor] = None
891
- prediction_logits: torch.FloatTensor = None
892
- seq_relationship_logits: torch.FloatTensor = None
893
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
894
- attentions: Optional[Tuple[torch.FloatTensor]] = None
895
-
896
-
897
- class HTransModel(HTransPreTrainedModel):
898
- """
899
-
900
- The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
901
- cross-attention is added between the self-attention layers, following the architecture described in [Attention is
902
- all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
903
- Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
904
-
905
- To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
906
- to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
907
- `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
908
- """
909
-
910
- def __init__(self, config, add_pooling_layer=True):
911
- super().__init__(config)
912
- self.config = config
913
-
914
- self.embeddings = HTransEmbeddings(config)
915
- self.encoder = HTransEncoder(config)
916
-
917
- self.pooler = HTransPooler(config) if add_pooling_layer else None
918
-
919
- # Initialize weights and apply final processing
920
- self.post_init()
921
-
922
- def get_input_embeddings(self):
923
- return self.embeddings.word_embeddings
924
-
925
- def set_input_embeddings(self, value):
926
- self.embeddings.word_embeddings = value
927
-
928
- def _prune_heads(self, heads_to_prune):
929
- """
930
- Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
931
- class PreTrainedModel
932
- """
933
- for layer, heads in heads_to_prune.items():
934
- self.encoder.layer[layer].attention.prune_heads(heads)
935
-
936
- def forward(
937
- self,
938
- input_ids: Optional[torch.Tensor] = None,
939
- attention_mask: Optional[torch.Tensor] = None,
940
- token_type_ids: Optional[torch.Tensor] = None,
941
- sec_mask: Optional[torch.FloatTensor] = None,
942
- doc_mask: Optional[torch.FloatTensor] = None,
943
- position_ids: Optional[torch.Tensor] = None,
944
- head_mask: Optional[torch.Tensor] = None,
945
- inputs_embeds: Optional[torch.Tensor] = None,
946
- encoder_hidden_states: Optional[torch.Tensor] = None,
947
- encoder_attention_mask: Optional[torch.Tensor] = None,
948
- past_key_values: Optional[List[torch.FloatTensor]] = None,
949
- use_cache: Optional[bool] = None,
950
- output_attentions: Optional[bool] = None,
951
- output_hidden_states: Optional[bool] = None,
952
- return_dict: Optional[bool] = None,
953
- head_ids: Optional[torch.Tensor] = None
954
- ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
955
- r"""
956
- encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
957
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
958
- the model is configured as a decoder.
959
- encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
960
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
961
- the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
962
-
963
- - 1 for tokens that are **not masked**,
964
- - 0 for tokens that are **masked**.
965
- past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
966
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
967
-
968
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
969
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
970
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
971
- use_cache (`bool`, *optional*):
972
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
973
- `past_key_values`).
974
- """
975
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
976
- output_hidden_states = (
977
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
978
- )
979
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
980
-
981
- if self.config.is_decoder:
982
- use_cache = use_cache if use_cache is not None else self.config.use_cache
983
- else:
984
- use_cache = False
985
-
986
- if input_ids is not None and inputs_embeds is not None:
987
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
988
- elif input_ids is not None:
989
- input_shape = input_ids.size()
990
- elif inputs_embeds is not None:
991
- input_shape = inputs_embeds.size()[:-1]
992
- else:
993
- raise ValueError("You have to specify either input_ids or inputs_embeds")
994
-
995
- batch_size, seq_length = input_shape
996
- device = input_ids.device if input_ids is not None else inputs_embeds.device
997
-
998
- # past_key_values_length
999
- past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1000
-
1001
- if attention_mask is None:
1002
- attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1003
-
1004
- if token_type_ids is None:
1005
- if hasattr(self.embeddings, "token_type_ids"):
1006
- buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1007
- buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1008
- token_type_ids = buffered_token_type_ids_expanded
1009
- else:
1010
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1011
-
1012
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1013
- # ourselves in which case we just need to make it broadcastable to all heads.
1014
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1015
- if sec_mask is None:
1016
- sec_mask = torch.ones(((batch_size, self.config.max_sec_length * self.config.max_doc_length)), device=device)
1017
- if doc_mask is None:
1018
- doc_mask = torch.ones(((batch_size, self.config.max_doc_length)), device=device)
1019
- if self.config.max_doc_length > 1:
1020
- sec_mask = torch.concat([doc_mask.unsqueeze(-1), sec_mask.view((batch_size, self.config.max_doc_length, self.config.max_sec_length))], dim=-1).view((batch_size,-1))
1021
- else:
1022
- sec_mask = torch.column_stack([torch.ones((batch_size, 1)), sec_mask])
1023
- extended_sec_attention_mask: torch.Tensor = self.get_extended_attention_mask(sec_mask, (batch_size, self.config.max_sec_length * self.config.max_doc_length + self.config.max_doc_length))
1024
- extended_doc_attention_mask: torch.Tensor = self.get_extended_attention_mask(doc_mask, (batch_size, self.config.max_doc_length))
1025
- # If a 2D or 3D attention mask is provided for the cross-attention
1026
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1027
- if self.config.is_decoder and encoder_hidden_states is not None:
1028
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1029
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1030
- if encoder_attention_mask is None:
1031
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1032
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1033
- else:
1034
- encoder_extended_attention_mask = None
1035
-
1036
- # Prepare head mask if needed
1037
- # 1.0 in head_mask indicate we keep the head
1038
- # attention_probs has shape bsz x n_heads x N x N
1039
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1040
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1041
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1042
-
1043
- embedding_output = self.embeddings(
1044
- input_ids=input_ids,
1045
- position_ids=position_ids,
1046
- token_type_ids=token_type_ids,
1047
- inputs_embeds=inputs_embeds,
1048
- past_key_values_length=past_key_values_length,
1049
- )
1050
- head_embeddings = self.embeddings.word_embeddings(head_ids)
1051
- encoder_outputs = self.encoder(
1052
- embedding_output,
1053
- attention_mask=extended_attention_mask,
1054
- head_mask=head_mask,
1055
- encoder_hidden_states=encoder_hidden_states,
1056
- encoder_attention_mask=encoder_extended_attention_mask,
1057
- past_key_values=past_key_values,
1058
- use_cache=use_cache,
1059
- output_attentions=output_attentions,
1060
- output_hidden_states=output_hidden_states,
1061
- return_dict=return_dict,
1062
- sec_head_emb=head_embeddings[:, 0:1, :],
1063
- doc_head_emb=head_embeddings[:, 1:2, :],
1064
- sec_mask=extended_sec_attention_mask,
1065
- doc_mask=extended_doc_attention_mask
1066
- )
1067
- sequence_output = encoder_outputs[0]
1068
- pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1069
-
1070
- if not return_dict:
1071
- return (sequence_output, pooled_output) + encoder_outputs[1:]
1072
-
1073
- return BaseModelOutputWithPoolingAndCrossAttentions(
1074
- last_hidden_state=sequence_output,
1075
- pooler_output=pooled_output,
1076
- past_key_values=encoder_outputs.past_key_values,
1077
- hidden_states=encoder_outputs.hidden_states,
1078
- attentions=encoder_outputs.attentions,
1079
- cross_attentions=encoder_outputs.cross_attentions,
1080
- )
1081
-
1082
-
1083
- class HTransForPreTraining(HTransPreTrainedModel):
1084
- _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias",
1085
- r"cls.predictions.decoder.weight"]
1086
-
1087
- def __init__(self, config):
1088
- super().__init__(config)
1089
-
1090
- self.bert = HTransModel(config)
1091
- self.cls = HTransPreTrainingHeads(config)
1092
-
1093
- # Initialize weights and apply final processing
1094
- self.post_init()
1095
-
1096
- def get_output_embeddings(self):
1097
- return self.cls.predictions.decoder
1098
-
1099
- def set_output_embeddings(self, new_embeddings):
1100
- self.cls.predictions.decoder = new_embeddings
1101
-
1102
- def forward(
1103
- self,
1104
- input_ids: Optional[torch.Tensor] = None,
1105
- attention_mask: Optional[torch.Tensor] = None,
1106
- sec_mask: Optional[torch.FloatTensor] = None,
1107
- doc_mask: Optional[torch.FloatTensor] = None,
1108
- token_type_ids: Optional[torch.Tensor] = None,
1109
- position_ids: Optional[torch.Tensor] = None,
1110
- head_mask: Optional[torch.Tensor] = None,
1111
- inputs_embeds: Optional[torch.Tensor] = None,
1112
- labels: Optional[torch.Tensor] = None,
1113
- next_sentence_label: Optional[torch.Tensor] = None,
1114
- output_attentions: Optional[bool] = None,
1115
- output_hidden_states: Optional[bool] = None,
1116
- return_dict: Optional[bool] = None,
1117
- head_ids: Optional[torch.Tensor] = None
1118
- ) -> Union[Tuple[torch.Tensor], HTransForPreTrainingOutput]:
1119
- r"""
1120
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1121
- Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1122
- config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1123
- the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1124
- next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1125
- Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1126
- pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1127
-
1128
- - 0 indicates sequence B is a continuation of sequence A,
1129
- - 1 indicates sequence B is a random sequence.
1130
- kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1131
- Used to hide legacy arguments that have been deprecated.
1132
-
1133
- Returns:
1134
-
1135
- Example:
1136
-
1137
- ```python
1138
- >>> from transformers import AutoTokenizer, BertForPreTraining
1139
- >>> import torch
1140
-
1141
- >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
1142
- >>> model = BertForPreTraining.from_pretrained("bert-base-uncased")
1143
-
1144
- >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1145
- >>> outputs = model(**inputs)
1146
-
1147
- >>> prediction_logits = outputs.prediction_logits
1148
- >>> seq_relationship_logits = outputs.seq_relationship_logits
1149
- ```
1150
- """
1151
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1152
-
1153
- outputs = self.bert(
1154
- input_ids,
1155
- attention_mask=attention_mask,
1156
- sec_mask=sec_mask,
1157
- doc_mask=doc_mask,
1158
- token_type_ids=token_type_ids,
1159
- position_ids=position_ids,
1160
- head_mask=head_mask,
1161
- inputs_embeds=inputs_embeds,
1162
- output_attentions=output_attentions,
1163
- output_hidden_states=output_hidden_states,
1164
- return_dict=return_dict,
1165
- head_ids=head_ids
1166
- )
1167
-
1168
- sequence_output, pooled_output = outputs[:2]
1169
- prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1170
-
1171
- total_loss = None
1172
- if labels is not None:
1173
- loss_fct = CrossEntropyLoss()
1174
- masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1175
- total_loss = masked_lm_loss
1176
-
1177
- if next_sentence_label is not None:
1178
- loss_fct = CrossEntropyLoss()
1179
- next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1180
- if total_loss is not None:
1181
- total_loss += next_sentence_loss
1182
- else:
1183
- total_loss = next_sentence_loss
1184
-
1185
- if not return_dict:
1186
- output = (prediction_scores, seq_relationship_score) + outputs[2:]
1187
- return ((total_loss,) + output) if total_loss is not None else output
1188
-
1189
- return HTransForPreTrainingOutput(
1190
- loss=total_loss,
1191
- prediction_logits=prediction_scores,
1192
- seq_relationship_logits=seq_relationship_score,
1193
- hidden_states=outputs.hidden_states,
1194
- attentions=outputs.attentions,
1195
- )
1196
-
1197
-
1198
- class HTransForSequenceClassification(HTransPreTrainedModel):
1199
- def __init__(self, config):
1200
- super().__init__(config)
1201
- self.num_labels = config.num_labels
1202
- self.config = config
1203
-
1204
- self.bert = HTransModel(config)
1205
- classifier_dropout = (
1206
- config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1207
- )
1208
- self.dropout = nn.Dropout(classifier_dropout)
1209
- self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1210
-
1211
- # Initialize weights and apply final processing
1212
- self.post_init()
1213
-
1214
- def forward(
1215
- self,
1216
- input_ids: Optional[torch.Tensor] = None,
1217
- attention_mask: Optional[torch.Tensor] = None,
1218
- token_type_ids: Optional[torch.Tensor] = None,
1219
- position_ids: Optional[torch.Tensor] = None,
1220
- head_mask: Optional[torch.Tensor] = None,
1221
- inputs_embeds: Optional[torch.Tensor] = None,
1222
- labels: Optional[torch.Tensor] = None,
1223
- output_attentions: Optional[bool] = None,
1224
- output_hidden_states: Optional[bool] = None,
1225
- return_dict: Optional[bool] = None,
1226
- ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1227
- r"""
1228
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1229
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1230
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1231
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1232
- """
1233
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1234
-
1235
- outputs = self.bert(
1236
- input_ids,
1237
- attention_mask=attention_mask,
1238
- token_type_ids=token_type_ids,
1239
- position_ids=position_ids,
1240
- head_mask=head_mask,
1241
- inputs_embeds=inputs_embeds,
1242
- output_attentions=output_attentions,
1243
- output_hidden_states=output_hidden_states,
1244
- return_dict=return_dict,
1245
- )
1246
-
1247
- pooled_output = outputs[1]
1248
-
1249
- pooled_output = self.dropout(pooled_output)
1250
- logits = self.classifier(pooled_output)
1251
-
1252
- loss = None
1253
- if labels is not None:
1254
- if self.config.problem_type is None:
1255
- if self.num_labels == 1:
1256
- self.config.problem_type = "regression"
1257
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1258
- self.config.problem_type = "single_label_classification"
1259
- else:
1260
- self.config.problem_type = "multi_label_classification"
1261
-
1262
- if self.config.problem_type == "regression":
1263
- loss_fct = MSELoss()
1264
- if self.num_labels == 1:
1265
- loss = loss_fct(logits.squeeze(), labels.squeeze())
1266
- else:
1267
- loss = loss_fct(logits, labels)
1268
- elif self.config.problem_type == "single_label_classification":
1269
- loss_fct = CrossEntropyLoss()
1270
- loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1271
- elif self.config.problem_type == "multi_label_classification":
1272
- loss_fct = BCEWithLogitsLoss()
1273
- loss = loss_fct(logits, labels)
1274
- if not return_dict:
1275
- output = (logits,) + outputs[2:]
1276
- return ((loss,) + output) if loss is not None else output
1277
-
1278
- return SequenceClassifierOutput(
1279
- loss=loss,
1280
- logits=logits,
1281
- hidden_states=outputs.hidden_states,
1282
- attentions=outputs.attentions,
1283
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
htrans/norms.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
-
3
- def ScriptedScaleNorm(hidden_size: int, eps: float = 1e-5):
4
- return torch.jit.script(ScaleNorm(hidden_size, eps))
5
-
6
-
7
- def ScriptedRMSNorm(hidden_size: int, eps: float = 1e-8):
8
- return torch.jit.script(RMSNorm(hidden_size, eps))
9
-
10
-
11
- class ScaleNorm(torch.nn.Module):
12
- """Quick and simple scale norm implementation.
13
-
14
- Do we also need FixNorm (cosine in the last layer)? It's a maybe here:
15
- https://github.com/lucidrains/performer-pytorch/issues/55#issuecomment-762544686
16
- """
17
-
18
- def __init__(self, hidden_size: int, eps: float = 1e-5):
19
- super().__init__()
20
- self.eps = eps
21
- self.learnable_scale = torch.nn.Parameter(torch.tensor(float(hidden_size) ** -0.5))
22
-
23
- def forward(self, inputs):
24
- """This is the same eps clipping as in the original ScaleNorm implementation."""
25
- return inputs * self.learnable_scale / torch.norm(inputs, dim=-1, keepdim=True).clamp(min=self.eps)
26
-
27
-
28
- class RMSNorm(torch.nn.Module):
29
- """The RMS variant of scaling norms."""
30
-
31
- def __init__(self, hidden_size: int, eps: float = 1e-8):
32
- super().__init__()
33
- self.eps = eps
34
- self.learnable_scale = torch.nn.Parameter(torch.ones(hidden_size) ** -0.5)
35
-
36
- def forward(self, inputs):
37
- """This is the same eps clipping as in the original ScaleNorm implementation."""
38
- return inputs * self.learnable_scale / torch.norm(inputs, dim=-1, keepdim=True).clamp(min=self.eps)
39
-
40
-
41
- def get_norm_fn(norm_name):
42
- if norm_name == "ScaleNorm":
43
- norm_fn = ScriptedScaleNorm
44
- elif norm_name == "RMSNorm":
45
- norm_fn = ScriptedRMSNorm
46
- elif norm_name == "ApexLayerNorm":
47
- from apex.normalization import FusedLayerNorm
48
-
49
- norm_fn = FusedLayerNorm
50
- else:
51
- norm_fn = getattr(torch.nn, norm_name)
52
- return norm_fn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
htrans/pytorch_utils.py DELETED
@@ -1,276 +0,0 @@
1
- # Copyright 2022 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import inspect
15
- from typing import Callable, List, Optional, Set, Tuple, Union
16
-
17
- import torch
18
- from packaging import version
19
- from torch import nn
20
- import logging
21
-
22
-
23
- ALL_LAYERNORM_LAYERS = [nn.LayerNorm]
24
-
25
- logger = logging.getLogger(__name__)
26
-
27
- parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version)
28
-
29
- is_torch_less_than_1_8 = parsed_torch_version_base < version.parse("1.8.0")
30
- is_torch_less_than_1_9 = parsed_torch_version_base < version.parse("1.9.0")
31
- is_torch_greater_or_equal_than_1_10 = parsed_torch_version_base >= version.parse("1.10")
32
- is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11")
33
-
34
-
35
- def torch_int_div(tensor1, tensor2):
36
- """
37
- A function that performs integer division across different versions of PyTorch.
38
- """
39
- if is_torch_less_than_1_8:
40
- return tensor1 // tensor2
41
- else:
42
- return torch.div(tensor1, tensor2, rounding_mode="floor")
43
-
44
-
45
- def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:
46
- """
47
- Prune a linear layer to keep only entries in index.
48
-
49
- Used to remove heads.
50
-
51
- Args:
52
- layer (`torch.nn.Linear`): The layer to prune.
53
- index (`torch.LongTensor`): The indices to keep in the layer.
54
- dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices.
55
-
56
- Returns:
57
- `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`.
58
- """
59
- index = index.to(layer.weight.device)
60
- W = layer.weight.index_select(dim, index).clone().detach()
61
- if layer.bias is not None:
62
- if dim == 1:
63
- b = layer.bias.clone().detach()
64
- else:
65
- b = layer.bias[index].clone().detach()
66
- new_size = list(layer.weight.size())
67
- new_size[dim] = len(index)
68
- new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
69
- new_layer.weight.requires_grad = False
70
- new_layer.weight.copy_(W.contiguous())
71
- new_layer.weight.requires_grad = True
72
- if layer.bias is not None:
73
- new_layer.bias.requires_grad = False
74
- new_layer.bias.copy_(b.contiguous())
75
- new_layer.bias.requires_grad = True
76
- return new_layer
77
-
78
-
79
- class Conv1D(nn.Module):
80
- """
81
- 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
82
-
83
- Basically works like a linear layer but the weights are transposed.
84
-
85
- Args:
86
- nf (`int`): The number of output features.
87
- nx (`int`): The number of input features.
88
- """
89
-
90
- def __init__(self, nf, nx):
91
- super().__init__()
92
- self.nf = nf
93
- w = torch.empty(nx, nf)
94
- nn.init.normal_(w, std=0.02)
95
- self.weight = nn.Parameter(w)
96
- self.bias = nn.Parameter(torch.zeros(nf))
97
-
98
- def forward(self, x):
99
- size_out = x.size()[:-1] + (self.nf,)
100
- x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
101
- x = x.view(size_out)
102
- return x
103
-
104
-
105
- def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
106
- """
107
- Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
108
- are transposed.
109
-
110
- Used to remove heads.
111
-
112
- Args:
113
- layer ([`~pytorch_utils.Conv1D`]): The layer to prune.
114
- index (`torch.LongTensor`): The indices to keep in the layer.
115
- dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices.
116
-
117
- Returns:
118
- [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
119
- """
120
- index = index.to(layer.weight.device)
121
- W = layer.weight.index_select(dim, index).clone().detach()
122
- if dim == 0:
123
- b = layer.bias.clone().detach()
124
- else:
125
- b = layer.bias[index].clone().detach()
126
- new_size = list(layer.weight.size())
127
- new_size[dim] = len(index)
128
- new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
129
- new_layer.weight.requires_grad = False
130
- new_layer.weight.copy_(W.contiguous())
131
- new_layer.weight.requires_grad = True
132
- new_layer.bias.requires_grad = False
133
- new_layer.bias.copy_(b.contiguous())
134
- new_layer.bias.requires_grad = True
135
- return new_layer
136
-
137
-
138
- def prune_layer(
139
- layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
140
- ) -> Union[nn.Linear, Conv1D]:
141
- """
142
- Prune a Conv1D or linear layer to keep only entries in index.
143
-
144
- Used to remove heads.
145
-
146
- Args:
147
- layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
148
- index (`torch.LongTensor`): The indices to keep in the layer.
149
- dim (`int`, *optional*): The dimension on which to keep the indices.
150
-
151
- Returns:
152
- `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
153
- """
154
- if isinstance(layer, nn.Linear):
155
- return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
156
- elif isinstance(layer, Conv1D):
157
- return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
158
- else:
159
- raise ValueError(f"Can't prune layer of class {layer.__class__}")
160
-
161
-
162
- def apply_chunking_to_forward(
163
- forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
164
- ) -> torch.Tensor:
165
- """
166
- This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension
167
- `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory.
168
-
169
- If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly
170
- applying `forward_fn` to `input_tensors`.
171
-
172
- Args:
173
- forward_fn (`Callable[..., torch.Tensor]`):
174
- The forward function of the model.
175
- chunk_size (`int`):
176
- The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`.
177
- chunk_dim (`int`):
178
- The dimension over which the `input_tensors` should be chunked.
179
- input_tensors (`Tuple[torch.Tensor]`):
180
- The input tensors of `forward_fn` which will be chunked
181
-
182
- Returns:
183
- `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`.
184
-
185
-
186
- Examples:
187
-
188
- ```python
189
- # rename the usual forward() fn to forward_chunk()
190
- def forward_chunk(self, hidden_states):
191
- hidden_states = self.decoder(hidden_states)
192
- return hidden_states
193
-
194
-
195
- # implement a chunked forward function
196
- def forward(self, hidden_states):
197
- return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
198
- ```"""
199
-
200
- assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
201
-
202
- # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
203
- num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
204
- if num_args_in_forward_chunk_fn != len(input_tensors):
205
- raise ValueError(
206
- f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
207
- "tensors are given"
208
- )
209
-
210
- if chunk_size > 0:
211
- tensor_shape = input_tensors[0].shape[chunk_dim]
212
- for input_tensor in input_tensors:
213
- if input_tensor.shape[chunk_dim] != tensor_shape:
214
- raise ValueError(
215
- f"All input tenors have to be of the same shape: {tensor_shape}, "
216
- f"found shape {input_tensor.shape[chunk_dim]}"
217
- )
218
-
219
- if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
220
- raise ValueError(
221
- f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
222
- f"size {chunk_size}"
223
- )
224
-
225
- num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
226
-
227
- # chunk input tensor into tuples
228
- input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
229
- # apply forward fn to every tuple
230
- output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
231
- # concatenate output at same dimension
232
- return torch.cat(output_chunks, dim=chunk_dim)
233
-
234
- return forward_fn(*input_tensors)
235
-
236
-
237
- def find_pruneable_heads_and_indices(
238
- heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
239
- ) -> Tuple[Set[int], torch.LongTensor]:
240
- """
241
- Finds the heads and their indices taking `already_pruned_heads` into account.
242
-
243
- Args:
244
- heads (`List[int]`): List of the indices of heads to prune.
245
- n_heads (`int`): The number of heads in the model.
246
- head_size (`int`): The size of each head.
247
- already_pruned_heads (`Set[int]`): A set of already pruned heads.
248
-
249
- Returns:
250
- `Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
251
- """
252
- mask = torch.ones(n_heads, head_size)
253
- heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
254
- for head in heads:
255
- # Compute how many pruned heads are before the head and move the index accordingly
256
- head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
257
- mask[head] = 0
258
- mask = mask.view(-1).contiguous().eq(1)
259
- index: torch.LongTensor = torch.arange(len(mask))[mask].long()
260
- return heads, index
261
-
262
-
263
- def meshgrid(
264
- *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None
265
- ) -> Tuple[torch.Tensor, ...]:
266
- """
267
- Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument.
268
-
269
- Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html
270
- """
271
- if is_torch_greater_or_equal_than_1_10:
272
- return torch.meshgrid(*tensors, indexing=indexing)
273
- else:
274
- if indexing != "ij":
275
- raise ValueError('torch.meshgrid only supports `indexing="ij"` for torch<1.10.')
276
- return torch.meshgrid(*tensors)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mdcr.py DELETED
@@ -1,58 +0,0 @@
1
- import json
2
- from typing import Union, Dict
3
-
4
- import logging
5
- import os
6
- import datasets
7
- import numpy as np
8
- from tqdm import tqdm
9
-
10
- from evaluation.embeddings_generator import EmbeddingsGenerator
11
- from evaluation.encoders import Model
12
- from evaluation.eval_datasets import SimpleDataset
13
- from evaluation.evaluator import IREvaluator
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class MDCREvaluator(IREvaluator):
19
- def __init__(self, name: str, meta_dataset: Union[str, tuple], test_dataset: Union[str, tuple], model: Model,
20
- metrics: tuple = None, batch_size: int = 16, fields: list = None, key="paper_id"):
21
- super(MDCREvaluator, self).__init__(name, meta_dataset, test_dataset, model, metrics, SimpleDataset,
22
- batch_size, fields, key)
23
-
24
- def get_qc_pairs(self, dataset):
25
- qrpairs = dict()
26
- for fos_dict in dataset:
27
- for fos in fos_dict:
28
- for query in fos_dict[fos]:
29
- qrpairs[query] = dict()
30
- for model in fos_dict[fos][query]:
31
- cands = fos_dict[fos][query][model]
32
- qrpairs[query].update({v: 1 if model == "true" else 0 for v in cands})
33
- return qrpairs
34
-
35
- def evaluate(self, embeddings, **kwargs):
36
- logger.info(f"Loading test dataset from {self.test_dataset}")
37
- split_dataset = datasets.load_dataset("json",
38
- data_files={"test": self.test_dataset})
39
- logger.info(f"Loaded {len(split_dataset['test'])} test query-candidate pairs")
40
- if type(embeddings) == str and os.path.isfile(embeddings):
41
- embeddings = EmbeddingsGenerator.load_embeddings_from_jsonl(embeddings)
42
-
43
- qrels_hard = self.get_qc_pairs(split_dataset["test"])
44
- preds = self.retrieval(embeddings, qrels_hard)
45
- results = dict()
46
- for q, cscores in tqdm(preds.items()):
47
- for c in cscores:
48
- results[f"{q}_{c}"] = cscores[c]
49
- json.dump(results, open("scirepeval_mdcr.json", "w"))
50
- return dict()
51
-
52
- import sys
53
- if __name__ == "__main__":
54
- mname = sys.argv[1]
55
- model = Model(variant="default", base_checkpoint=mname)
56
- evaluator = MDCREvaluator("mcdr", "../mdcr/mdcr_test_data.jsonl", "../mdcr/mdcr_test.json", model, batch_size=32)
57
- embeddings = evaluator.generate_embeddings(save_path="mdcr_embeddings.json")
58
- evaluator.evaluate(embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,100 +0,0 @@
1
- absl-py==1.2.0
2
- adapter-transformers==3.0.1
3
- aiohttp==3.8.1
4
- aiosignal==1.2.0
5
- antlr4-python3-runtime==4.8
6
- async-timeout==4.0.2
7
- attrs==22.1.0
8
- bitarray==2.6.0
9
- boto==2.49.0
10
- boto3==1.24.90
11
- botocore==1.27.90
12
- cachetools==5.2.0
13
- certifi==2022.6.15
14
- cffi==1.15.1
15
- charset-normalizer==2.1.1
16
- click==8.1.3
17
- colorama==0.4.5
18
- Cython==0.29.32
19
- datasets==2.5.2
20
- dill==0.3.5.1
21
- fairseq==0.12.2
22
- filelock==3.8.0
23
- frozenlist==1.3.1
24
- fsspec==2022.7.1
25
- google-auth==2.11.0
26
- google-auth-oauthlib==0.4.6
27
- grpcio==1.47.0
28
- huggingface-hub==0.9.0
29
- hydra-core==1.0.7
30
- idna==3.3
31
- ijson==3.1.4
32
- importlib-metadata==4.12.0
33
- importlib-resources==5.9.0
34
- install==1.3.5
35
- jmespath==1.0.1
36
- joblib==1.1.0
37
- lxml==4.9.1
38
- Markdown==3.4.1
39
- MarkupSafe==2.1.1
40
- multidict==6.0.2
41
- multiprocess==0.70.13
42
- numpy==1.23.2
43
- oauthlib==3.2.0
44
- omegaconf==2.0.6
45
- packaging==21.3
46
- pandas==1.5.0
47
- Pillow==9.2.0
48
- pip==22.1.2
49
- portalocker==2.5.1
50
- protobuf==3.19.4
51
- psycopg2-binary==2.9.4
52
- pyarrow==9.0.0
53
- pyasn1==0.4.8
54
- pyasn1-modules==0.2.8
55
- pycparser==2.21
56
- pyDeprecate==0.3.2
57
- pyparsing==3.0.9
58
- python-dateutil==2.8.2
59
- pytorch-lightning==1.7.2
60
- pytrec-eval==0.5
61
- pytz==2022.4
62
- PyYAML==6.0
63
- regex==2022.8.17
64
- requests==2.28.1
65
- requests-oauthlib==1.3.1
66
- responses==0.18.0
67
- rsa==4.9
68
- s3transfer==0.6.0
69
- sacrebleu==2.2.0
70
- sacremoses==0.0.53
71
- scikit-learn==1.1.2
72
- scikit-multilearn==0.2.0
73
- scipy==1.9.0
74
- setuptools==63.4.1
75
- six==1.16.0
76
- sklearn==0.0
77
- sklearn-contrib-lightning==0.6.2.post0
78
- tabulate==0.8.10
79
- tensorboard==2.10.0
80
- tensorboard-data-server==0.6.1
81
- tensorboard-plugin-wit==1.8.1
82
- tensorboardX==2.5.1
83
- threadpoolctl==3.1.0
84
- tokenizers==0.12.1
85
- torch==1.12.1
86
- torchaudio==0.12.1
87
- torchmetrics==0.9.3
88
- torchvision==0.13.1
89
- tqdm==4.64.0
90
- transformers==4.21.1
91
- typing_extensions==4.3.0
92
- urllib3==1.26.12
93
- Werkzeug==2.2.2
94
- wheel==0.37.1
95
- xxhash==3.0.0
96
- yarl==1.8.1
97
- zipp==3.8.1
98
- openai==0.26.1
99
- InstructorEmbedding==1.0.0
100
- sentence_transformers==2.2.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
reviewer_matching.py DELETED
@@ -1,65 +0,0 @@
1
- from typing import Union, Dict
2
-
3
- import logging
4
- import os
5
- import datasets
6
- import numpy as np
7
- from tqdm import tqdm
8
-
9
- from evaluation.embeddings_generator import EmbeddingsGenerator
10
- from evaluation.encoders import Model
11
- from evaluation.eval_datasets import SimpleDataset
12
- from evaluation.evaluator import IREvaluator
13
- from sklearn.metrics.pairwise import cosine_similarity
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class ReviewerMatchingEvaluator(IREvaluator):
19
- def __init__(self, name: str, meta_dataset: Union[str, tuple], test_dataset: Union[str, tuple],
20
- reviewer_metadata: Union[str, tuple], model: Model,
21
- metrics: tuple = ("P_5", "P_10"), batch_size: int = 16, fields: list = None):
22
- super(ReviewerMatchingEvaluator, self).__init__(name, meta_dataset, test_dataset, model, metrics, SimpleDataset,
23
- batch_size, fields, )
24
- self.reviewer_metadata = reviewer_metadata
25
-
26
- def evaluate(self, embeddings, **kwargs):
27
- logger.info(f"Loading test dataset from {self.test_dataset}")
28
- if type(self.test_dataset) == str and os.path.isdir(self.test_dataset):
29
- split_dataset = datasets.load_dataset("json",
30
- data_files={"test_hard": f"{self.test_dataset}/test_hard_qrel.jsonl",
31
- "test_soft": f"{self.test_dataset}/test_soft_qrel.jsonl"})
32
- else:
33
- split_dataset = datasets.load_dataset(self.test_dataset[0], self.test_dataset[1])
34
- logger.info(f"Loaded {len(split_dataset['test_hard'])} test query-candidate pairs for hard and soft tests")
35
- if type(embeddings) == str and os.path.isfile(embeddings):
36
- embeddings = EmbeddingsGenerator.load_embeddings_from_jsonl(embeddings)
37
-
38
- qrels_hard = self.get_qc_pairs(split_dataset["test_hard"])
39
- qrels_soft = self.get_qc_pairs(split_dataset["test_soft"])
40
- preds = self.retrieval(embeddings, qrels_hard)
41
- results = {f"hard_{k}": v for k, v in self.calc_metrics(qrels_hard, preds).items()}
42
- results.update({f"soft_{k}": v for k, v in self.calc_metrics(qrels_soft, preds).items()})
43
- self.print_results(results)
44
- return results
45
-
46
- def retrieval(self, embeddings, qrels: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, float]]:
47
- logger.info("Loading reviewer metadata...")
48
- if type(self.reviewer_metadata) == str and os.path.isdir(self.reviewer_metadata):
49
- reviewer_dataset = datasets.load_dataset("json", data_files={
50
- "metadata": f"{self.reviewer_metadata}/reviewer_metadata.jsonl"})["metadata"]
51
- else:
52
- reviewer_dataset = datasets.load_dataset(self.reviewer_metadata[0], self.reviewer_metadata[1],
53
- split="metadata")
54
- logger.info(f"Loaded {len(reviewer_dataset)} reviewer metadata")
55
- reviewer_papers = {d["r_id"]: d["papers"] for d in reviewer_dataset}
56
-
57
- run = dict()
58
- for qid in tqdm(qrels):
59
- query = np.array([embeddings[qid]])
60
- cand_papers = {cid: np.array([embeddings[pid] for pid in reviewer_papers[cid]]) for cid in qrels[qid] if
61
- cid in reviewer_papers}
62
- scores = {cid: cosine_similarity(cand_papers[cid], query).flatten() for cid in cand_papers}
63
- sorted_scores = {cid: sorted(scores[cid], reverse=True) for cid in scores}
64
- run[qid] = {cid: float(np.mean(sorted_scores[cid][:3])) for cid in sorted_scores}
65
- return run
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
s2and_embeddings.py DELETED
@@ -1,56 +0,0 @@
1
- import pickle
2
-
3
- import numpy as np
4
-
5
- from evaluation.encoders import Model
6
- from evaluation.eval_datasets import SimpleDataset
7
- from evaluation.evaluator import Evaluator
8
- import argparse
9
- from tqdm import tqdm
10
-
11
- import json
12
-
13
-
14
- def read_data(file_path):
15
- task_data = json.load(open(file_path, "r"))
16
- task_data = list(task_data.values())
17
- return task_data
18
-
19
-
20
- class S2ANDEvaluator:
21
-
22
- def __init__(self, data_dir: str, model: Model, batch_size: int = 16):
23
- blocks = ["arnetminer", "inspire", "kisti", "pubmed", "qian", "zbmath"]
24
- self.data_dir = data_dir
25
- self.evaluators = [
26
- Evaluator(block, f"{data_dir}/{block}/{block}_papers.json", SimpleDataset, model, batch_size, [],
27
- "paper_id", process_fn=read_data) for block in blocks]
28
-
29
- def generate_embeddings(self, suffix: str):
30
- for evaluator in tqdm(self.evaluators):
31
- print(evaluator.name)
32
- results = evaluator.generate_embeddings()
33
- paper_ids, embs = np.array([str(k) for k in results]), np.array(
34
- [results[k] for k in results])
35
- pickle.dump((embs, paper_ids),
36
- open(f"{self.data_dir}/{evaluator.name}/{evaluator.name}_{suffix}.pkl", "wb"))
37
-
38
-
39
- if __name__ == "__main__":
40
- parser = argparse.ArgumentParser()
41
- parser.add_argument('--mtype', help='Model variant to be used (default, pals, adapters, fusion)', default="default")
42
- parser.add_argument('--model', '-m', help='HuggingFace model to be used')
43
- parser.add_argument('--ctrl-tokens', action='store_true', default=False, help='use control codes for tasks')
44
- parser.add_argument('--adapters-dir', help='path to the adapter checkpoints', default=None)
45
- parser.add_argument('--adapters-chkpt', help='hf adapter names keyed on tasks', default=None, type=json.loads)
46
- parser.add_argument('--fusion-dir', help='path to the fusion checkpoints', default=None)
47
- parser.add_argument("--data-dir", help="path to the data directory")
48
- parser.add_argument("--suffix", help="suffix for output embedding files")
49
-
50
- args = parser.parse_args()
51
- adapters_load_from = args.adapters_dir if args.adapters_dir else args.adapters_chkpt
52
- model = Model(variant=args.mtype, base_checkpoint=args.model, adapters_load_from=adapters_load_from,
53
- fusion_load_from=args.fusion_dir, use_ctrl_codes=args.ctrl_tokens,
54
- task_id="[PRX]", all_tasks=["[CLF]", "[PRX]", "[RGN]", "[QRY]"])
55
- evaluator = S2ANDEvaluator(args.data_dir, model)
56
- evaluator.generate_embeddings(args.suffix)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scirepeval.py DELETED
@@ -1,159 +0,0 @@
1
- import os
2
- import argparse
3
- import json
4
- from typing import List, Union
5
- from tqdm import tqdm
6
- from evaluation.encoders import Model, HModel
7
- from evaluation.evaluator import IREvaluator, SupervisedEvaluator, SupervisedTask
8
- from evaluation.few_shot_evaluator import FewShotEvaluator
9
- from evaluation.gpt3_encoder import GPT3Model
10
- from evaluation.instructor import InstructorModel
11
- from reviewer_matching import ReviewerMatchingEvaluator
12
- from evaluation.eval_datasets import SimpleDataset, IRDataset
13
-
14
- TASK_IDS = {"classification": "[CLF]", "regression": "[RGN]", "proximity": "[PRX]",
15
- "adhoc_search": {"query": "[QRY]", "candidates": "[PRX]"}}
16
- import pytorch_lightning as pl
17
-
18
- pl.seed_everything(42, workers=True)
19
-
20
-
21
- class SciRepEval:
22
-
23
- def __init__(self, tasks_config: str = "super_scirep.jsonl", task_list: List[str] = None,
24
- task_formats: List[str] = None, batch_size: int = 32, document=False):
25
- tasks_dict = dict()
26
- task_by_formats = dict()
27
- with open(tasks_config, encoding="utf-8") as f:
28
- for line in f:
29
- d = json.loads(line)
30
- tasks_dict[d["name"]] = d
31
- if d["type"] not in task_by_formats:
32
- task_by_formats[d["type"]] = []
33
- task_by_formats[d["type"]].append(d["name"])
34
- if not task_list and not task_formats:
35
- self.tasks = tasks_dict
36
- elif task_list:
37
- self.tasks = {k: tasks_dict[k] for k in task_list}
38
- elif task_formats:
39
- self.tasks = dict()
40
- for task_format in task_formats:
41
- self.tasks.update({k: tasks_dict[k] for k in task_by_formats[task_format]})
42
- self.batch_size = batch_size
43
- self.document=document
44
-
45
- def evaluate(self, model: Union[Model, List[Model]], output: str):
46
- final_results = dict()
47
- if type(model) != list:
48
- model = [model]
49
- for task_name, task in tqdm(self.tasks.items(), total=len(self.tasks)):
50
- for m in model:
51
- m.task_id = TASK_IDS[task["type"]]
52
- kwargs = dict()
53
- task_data = task["data"]
54
- if not task_data.get("meta"):
55
- raise ValueError(f"Task {task_name} has no test metadata")
56
- if task_data.get("meta"):
57
- metadata = task_data["meta"]
58
- kwargs["meta_dataset"] = metadata if type(metadata) != dict else (metadata["name"], metadata["config"])
59
-
60
- if not task_data.get("test"):
61
- if type(metadata) == dict:
62
- kwargs["test_dataset"] = (metadata["name"], metadata["config"])
63
- else:
64
- raise ValueError(f"Task {task_name} has no test data")
65
- if task_data.get("test"):
66
- testdata = task_data["test"]
67
- kwargs["test_dataset"] = testdata if type(testdata) != dict else (testdata["name"], testdata["config"])
68
-
69
- kwargs["metrics"] = tuple(task["metrics"])
70
-
71
- kwargs["batch_size"] = task["batch_size"] if "batch_size" in task else self.batch_size
72
-
73
- if "fields" in task:
74
- kwargs["fields"] = task["fields"]
75
- save_path, load_path = None, None
76
- if "embeddings" in task:
77
- save_path = task["embeddings"].get("save")
78
- load_path = task["embeddings"].get("load")
79
- few_shot_evaluators = []
80
- if task["type"] in {"classification", "regression"}:
81
- subtype = SupervisedTask.CLASSIFICATION if task[
82
- "type"] == "classification" else SupervisedTask.REGRESSION
83
- if task.get("multi_label"):
84
- subtype = SupervisedTask.MULTILABEL_CLASSIFICATION
85
- evaluator = SupervisedEvaluator(task_name, subtype, model=model,
86
- **kwargs)
87
- if task.get("few_shot"):
88
- for run in task["few_shot"]:
89
- few_shot_evaluators.append(
90
- FewShotEvaluator(f"{task_name} {run['sample_size']} shot", subtype, model=model,
91
- sample_size=run["sample_size"], num_iterations=run["iterations"],
92
- **kwargs))
93
- else:
94
- if task_name == "Paper-Reviewer Matching":
95
- if not task_data.get("reviewers") and not task_data.get("hf_reviewers"):
96
- raise ValueError(f"Task {task_name} has no reviewer metadata locally or hf_metadata")
97
- if task_data.get("reviewers"):
98
- reviewers = task_data["reviewers"]
99
- kwargs["reviewer_metadata"] = reviewers if type(reviewers) != dict else (
100
- reviewers["name"], reviewers["config"])
101
- evaluator = ReviewerMatchingEvaluator(task_name, model=model, **kwargs)
102
- else:
103
- data_class = SimpleDataset if task_data.get("simple_format") else IRDataset
104
- evaluator = IREvaluator(task_name, model=model, dataset_class=data_class, **kwargs)
105
- embeddings = evaluator.generate_embeddings(save_path, htrans=args.htrans, document=args.document) if not load_path else load_path
106
- results = evaluator.evaluate(embeddings)
107
- if not few_shot_evaluators:
108
- final_results[task_name] = results
109
- else:
110
- final_results[task_name] = dict()
111
- final_results[task_name]["complete"] = results
112
- final_results[task_name]["few_shot"] = []
113
-
114
- for few_shot in few_shot_evaluators:
115
- final_results[task_name]["few_shot"].append(
116
- {"sample_size": few_shot.sample_size, "results": few_shot.evaluate(embeddings)})
117
- final_results[task_name]["task_name"] = task_name
118
- with open(output, "a") as f:
119
- json.dump(final_results[task_name], f, indent=4)
120
- f.write("\n")
121
-
122
-
123
- if __name__ == "__main__":
124
- parser = argparse.ArgumentParser()
125
- parser.add_argument('--tasks-config', help='path to the task config file', default="super_scirep.jsonl")
126
- parser.add_argument('--mtype', help='Model variant to be used (default, pals, adapters, fusion)', default="default")
127
- parser.add_argument('--gpt3-model', help='Name of embedding model in case of using openai api', default=None)
128
- parser.add_argument('--model', '-m', help='HuggingFace model to be used')
129
- parser.add_argument('--max_len', default=512, type=int)
130
- parser.add_argument('--batch-size', type=int, default=32, help='batch size')
131
- parser.add_argument('--ctrl-tokens', action='store_true', default=False, help='use control codes for tasks')
132
- parser.add_argument('--adapters-dir', help='path to the adapter checkpoints', default=None)
133
- parser.add_argument('--fusion-dir', help='path to the fusion checkpoints', default=None)
134
- parser.add_argument('--adapters-chkpt', help='hf adapter names keyed on tasks', default=None, type=json.loads)
135
- parser.add_argument('--output', help="path to the output file", default="scirepeval_results.json")
136
- parser.add_argument('--fp16', action='store_true', default=False, help='use floating point 16 precision')
137
- parser.add_argument('--htrans', action='store_true', default=False, help='use hierarchical model')
138
- parser.add_argument('--instructor', action='store_true', default=False, help='use an instructor model for eval')
139
- parser.add_argument('--document', action='store_true', default=False)
140
-
141
- args = parser.parse_args()
142
- adapters_load_from = args.adapters_dir if args.adapters_dir else args.adapters_chkpt
143
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
144
- if args.gpt3_model:
145
- model = GPT3Model(embed_model=args.gpt3_model)
146
- elif args.instructor:
147
- model = InstructorModel(args.model)
148
- elif args.htrans:
149
- model = HModel(variant=args.mtype, base_checkpoint=args.model, adapters_load_from=adapters_load_from,
150
- fusion_load_from=args.fusion_dir,
151
- use_ctrl_codes=args.ctrl_tokens,
152
- task_id="", all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"], use_fp16=args.fp16)
153
- else:
154
- model = Model(variant=args.mtype, base_checkpoint=args.model, adapters_load_from=adapters_load_from,
155
- fusion_load_from=args.fusion_dir,
156
- use_ctrl_codes=args.ctrl_tokens,
157
- task_id="", all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"], use_fp16=args.fp16, document=args.document)
158
- evaluator = SciRepEval(tasks_config=args.tasks_config, batch_size=args.batch_size, document=args.document)
159
- evaluator.evaluate(model, args.output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scirepeval_tasks.jsonl DELETED
@@ -1,22 +0,0 @@
1
- {"name":"Biomimicry","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"biomimicry"},"test":{"name":"allenai/scirepeval_test","config":"biomimicry"}},"metrics":["f1"],"few_shot":[{"sample_size":64,"iterations":50},{"sample_size":16,"iterations":100}]}
2
- {"name":"DRSM","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"drsm"},"test":{"name":"allenai/scirepeval_test","config":"drsm"}},"metrics":["f1_macro"],"few_shot":[{"sample_size":64,"iterations":50},{"sample_size":24,"iterations":100}]}
3
- {"name":"Feeds-1","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"feeds_1"},"test":{"name":"allenai/scirepeval_test","config":"feeds_1"}},"metrics":["map"]}
4
- {"name":"Feeds-M","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"feeds_m"},"test":{"name":"allenai/scirepeval_test","config":"feeds_m"}},"metrics":["map"]}
5
- {"name":"Feeds Title","type":"adhoc_search","data":{"meta":{"name":"allenai/scirepeval","config":"feeds_title"},"test":{"name":"allenai/scirepeval_test","config":"feeds_title"}},"metrics":["map"]}
6
- {"name":"TREC-CoVID","type":"adhoc_search","data":{"meta":{"name":"allenai/scirepeval","config":"trec_covid"},"test":{"name":"allenai/scirepeval_test","config":"trec_covid"}},"metrics":["ndcg"]}
7
- {"name":"Peer Review Score","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"peer_review_score_hIndex"},"test":{"name":"allenai/scirepeval_test","config":"peer_review_score"}},"embeddings":{"save":"embeddings/peer_review_score_hIndex.jsonl"},"metrics":["kendalltau"]}
8
- {"name":"Max hIndex","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"peer_review_score_hIndex"},"test":{"name":"allenai/scirepeval_test","config":"hIndex"}},"embeddings":{"load":"embeddings/peer_review_score_hIndex.jsonl"},"metrics":["kendalltau"]}
9
- {"name":"Tweet Mentions","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"tweet_mentions"},"test":{"name":"allenai/scirepeval_test","config":"tweet_mentions"}},"metrics":["kendalltau"]}
10
- {"name":"SciDocs MAG","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"scidocs_mag_mesh"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_mag"}},"embeddings":{"save":"embeddings/scidocs_mag_mesh.jsonl"},"metrics":["f1_macro"]}
11
- {"name":"SciDocs MeSH","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"scidocs_mag_mesh"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_mesh"}},"embeddings":{"load":"embeddings/scidocs_mag_mesh.jsonl"},"metrics":["f1_macro"]}
12
- {"name":"SciDocs Cite","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_cite"}},"embeddings":{"save":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
13
- {"name":"SciDocs CoView","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_view"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
14
- {"name":"SciDocs CoCite","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_cocite"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
15
- {"name":"SciDocs CoRead","type":"proximity","data":{"simple_format":true, "meta":{"name":"allenai/scirepeval","config":"scidocs_view_cite_read"},"test":{"name":"allenai/scirepeval_test","config":"scidocs_read"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
16
- {"name":"Same Author Detection","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"same_author"},"test":{"name":"allenai/scirepeval_test","config":"same_author"}},"metrics":["map"]}
17
- {"name":"Highly Influential Citations","type":"proximity","data":{"meta":{"name":"allenai/scirepeval","config":"high_influence_cite"},"test":{"name":"allenai/scirepeval_test","config":"high_influence_cite"}},"metrics":["map"]}
18
- {"name":"Search","type":"adhoc_search","data":{"meta":{"name":"allenai/scirepeval","config":"search"},"test":{"name":"allenai/scirepeval_test","config":"search"}},"fields":["title","abstract","venue","year"],"metrics":["ndcg"]}
19
- {"name":"Citation Count","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"cite_count"},"test":{"name":"allenai/scirepeval_test","config":"cite_count"}},"metrics":["kendalltau"]}
20
- {"name":"Publication Year","type":"regression","data":{"meta":{"name":"allenai/scirepeval","config":"pub_year"},"test":{"name":"allenai/scirepeval_test","config":"pub_year"}},"metrics":["kendalltau"]}
21
- {"name":"Fields of study","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"fos"},"test":{"name":"allenai/scirepeval_test","config":"fos"}},"metrics":["f1_macro"],"few_shot":[{"sample_size":10,"iterations":50},{"sample_size":5,"iterations":100}],"multi_label":true}
22
- {"name":"MeSH","type":"classification","data":{"meta":{"name":"allenai/scirepeval","config":"mesh_descriptors"},"test":{"name":"allenai/scirepeval_test","config":"mesh_descriptors"}},"metrics":["f1_macro"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
super_scirep.jsonl DELETED
@@ -1,16 +0,0 @@
1
- {"name":"Feeds-1","type":"proximity","data":{"meta":{"name":"howey/super_scirep","config":"feeds_1"},"test":{"name":"howey/super_scirep_test","config":"feeds_1"}},"metrics":["map"]}
2
- {"name":"Feeds-M","type":"proximity","data":{"meta":{"name":"howey/super_scirep","config":"feeds_m"},"test":{"name":"howey/super_scirep_test","config":"feeds_m"}},"metrics":["map"]}
3
- {"name":"Highly Influential Citations","type":"proximity","data":{"meta":{"name":"howey/super_scirep","config":"high_influence_cite"},"test":{"name":"howey/super_scirep_test","config":"high_influence_cite"}},"metrics":["map"]}
4
- {"name":"SciDocs Cite","type":"proximity","data":{"simple_format":true, "meta":{"name":"howey/super_scirep","config":"scidocs_view_cite_read"},"test":{"name":"howey/super_scirep_test","config":"scidocs_cite"}},"embeddings":{"save":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
5
- {"name":"SciDocs CoCite","type":"proximity","data":{"simple_format":true, "meta":{"name":"howey/super_scirep","config":"scidocs_view_cite_read"},"test":{"name":"howey/super_scirep_test","config":"scidocs_cocite"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
6
- {"name":"Fields of study","type":"classification","data":{"meta":{"name":"howey/super_scirep","config":"fos"},"test":{"name":"howey/super_scirep_test","config":"fos"}},"metrics":["f1_macro"],"few_shot":[{"sample_size":10,"iterations":50},{"sample_size":5,"iterations":100}],"multi_label":true}
7
- {"name":"Publication Year","type":"regression","data":{"meta":{"name":"howey/super_scirep","config":"pub_year"},"test":{"name":"howey/super_scirep_test","config":"pub_year"}},"metrics":["kendalltau"]}
8
- {"name":"Search","type":"adhoc_search","data":{"meta":{"name":"howey/super_scirep","config":"search"},"test":{"name":"howey/super_scirep_test","config":"search"}},"fields":["title","abstract","venue","year"],"metrics":["ndcg"]}
9
- {"name":"Feeds Title","type":"adhoc_search","data":{"meta":{"name":"howey/super_scirep","config":"feeds_title"},"test":{"name":"howey/super_scirep_test","config":"feeds_title"}},"metrics":["map"]}
10
- {"name":"Paper-Reviewer Matching","type":"proximity","data":{"meta":{"name":"howey/super_scirep","config":"paper_reviewer_matching"},"test":{"name":"howey/super_scirep_test","config":"paper_reviewer_matching"},"reviewers":{"name":"howey/super_scirep_test","config":"reviewers"}},"metrics":["P_5", "P_10"]}
11
- {"name":"SciDocs CoView","type":"proximity","data":{"simple_format":true, "meta":{"name":"howey/super_scirep","config":"scidocs_view_cite_read"},"test":{"name":"howey/super_scirep_test","config":"scidocs_view"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
12
- {"name":"SciDocs CoRead","type":"proximity","data":{"simple_format":true, "meta":{"name":"howey/super_scirep","config":"scidocs_view_cite_read"},"test":{"name":"howey/super_scirep_test","config":"scidocs_read"}},"embeddings":{"load":"embeddings/scidocs_view_cite_read.jsonl"},"metrics":["map","ndcg"]}
13
- {"name":"Peer Review Score","type":"regression","data":{"meta":{"name":"howey/super_scirep","config":"peer_review_score_hIndex"},"test":{"name":"howey/super_scirep_test","config":"peer_review_score"}},"embeddings":{"save":"embeddings/peer_review_score_hIndex.jsonl"},"metrics":["kendalltau"]}
14
- {"name":"Max hIndex","type":"regression","data":{"meta":{"name":"howey/super_scirep","config":"peer_review_score_hIndex"},"test":{"name":"howey/super_scirep_test","config":"hIndex"}},"embeddings":{"load":"embeddings/peer_review_score_hIndex.jsonl"},"metrics":["kendalltau"]}
15
- {"name":"Tweet Mentions","type":"regression","data":{"meta":{"name":"howey/super_scirep","config":"tweet_mentions"},"test":{"name":"howey/super_scirep_test","config":"tweet_mentions"}},"metrics":["kendalltau"]}
16
- {"name":"Citation Count","type":"regression","data":{"meta":{"name":"howey/super_scirep","config":"cite_count"},"test":{"name":"howey/super_scirep_test","config":"cite_count"}},"metrics":["kendalltau"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/TRAINING.md DELETED
@@ -1,138 +0,0 @@
1
- ## Training
2
- The code available as part of this sub-directory can be used to train a general purpose multi-task model or the multi-format based models introduced in [SciRepEval]([https://openreview.net/pdf?id=zfiYcbeQkH](https://arxiv.org/abs/2211.13308)).
3
-
4
- Post the quick setup step in ReadMe, you can choose to train the following base models:
5
- (Parenthesis denote how they are referred in the paper)
6
- 1. General multi task model (MTL CLS) - \[CLS\] token embedding is considered document representation
7
- 2. Multi-task training w. Control Codes (MTL CTRL) - Control codes prepended to input and their embedding is considered document representation
8
- 3. [BERT PALs](https://github.com/AsaCooperStickland/Bert-n-Pals) (PALs) - Task specific modules
9
- 4. [Adapters and Fusion](https://github.com/adapter-hub/adapter-transformers) - Task specific adapters
10
-
11
- #### Step 1
12
- ```bash
13
- cd ${ROOT}/training
14
- ```
15
-
16
- Define the tasks and associated metadata as a list in a json config file. Refer to [sample_data/tasks_config.json](https://github.com/allenai/scirepeval/blob/main/training/sample_data/tasks_config.json) for SciRepEval training config**.
17
-
18
- ** The example config below assumes the training data has been downloaded locally and uses the `"data_files"` property. For using HuggingFace datasets please follow the notes following the example config and use the `"dataset"` property.
19
-
20
- *Example config:*
21
- ```json
22
- [
23
- {
24
- "name": "fos",
25
- "type": "classification",
26
- "multi_label": true,
27
- "data_files":
28
- {
29
- "train": "<scirepeval_data_dir>/train/fos/train.jsonl",
30
- "dev": "<scirepeval_data_dir>/train/fos/val.jsonl"
31
- },
32
- "labels": "sample_data/fos_labels.txt",
33
- "labels_field": "labels_text",
34
- "ctrl_token": "[CLF]",
35
- "sample_size":
36
- {
37
- "train": 600000,
38
- "dev": 40000
39
- }
40
- }
41
- ]
42
-
43
- ```
44
- **Note**
45
-
46
- - `"type"` can be one of `["classification", "regression", "ir", "triplet"]`.
47
- - `"classification"` is suitable for tasks with categorical (discrete) labels,;`"regression"` for tasks with continuous labels; `"ir"` for retrieval tasks formatted as `{"query": X, "candidates": [{}]}` and `"triplet"` for contrastive learning tasks formatted as `{"query": q, "pos": p, "neg": n}`.
48
- - For multi label classification, add `"multi_label": true` as in the above example.
49
- - By default the pre-processing code expects "title" and "abstract" in every example. To process specific fields, provide additional property as `"input_fields": ["title", "abstract", "venue", "year"]`.
50
- - For models apart from MTL CLS, provide the `"ctrl_token"` associated with each task, for MTL CTRL it works as the special control code and for PALs and Adapters it acts as the task id to determine the module to be used in the forward pass.
51
- - Some "ir" tasks like ad-hoc search \[SRCH\] might require different control codes forthe query and candidates which can be provided as `"ctrl_token": {"query": "[QRY]", "candidates": "[PRX]"}`. For PALs and Adapters, this task id is internally resolved to feed the queries and candidates to their relevant modules.
52
- - `"sample_size"` is not required if all the samples are to be processed for the splits.
53
- - If loading data from Huggingface datsets, instead of `"data_files"`, you can provide parameters for `load_dataset` method as - `"dataset": {"path": <hf dataset name>, "name": <optional config name for dataset with multiple configs>}`.
54
- - ``if "type"=="regresion": <provide the "labels_field"> elif "type" =="classification": <provide the "labels" and "labels_field"> ``
55
- - Losses associated with each task type:
56
-
57
- |Type|Loss |
58
- |--|--|
59
- | Classification |Cross Entropy |
60
- |Multi-label Classification|Binary Cross Entropy|
61
- |Regression|Mean Squared Error|
62
- |IR/Triplet|Triplet or Contrastive Loss|
63
-
64
-
65
- #### Step 2
66
- To run the training script with default params, based upon the type of models you want to train run one of the following commands:
67
- **MTL CLS**
68
- ```bash
69
- python pl_training.py --gpu 2 --tasks-config sample_data/tasks_config.json <base model name/chkpoint path> <expt name>
70
- ```
71
-
72
- **MTL CTRL**
73
- ```bash
74
- python pl_training.py --gpu 2 --ctrl-tokens --tasks-config sample_data/tasks_config.json <base model name/chkpoint path> <expt name>
75
- ```
76
-
77
- **PALs**
78
-
79
- Requires pals config file for additional model configuration. Files present under `bert_pals_config` directory.
80
- ```bash
81
- python pl_training.py --gpu 2 --pals-config pals.config.json --tasks-config sample_data/tasks_config.json <base model name/chkpoint path> <expt name>
82
- ```
83
- **Adapters**
84
- ```bash
85
- python pl_training.py --gpu 2 --adapter-type single --tasks-config sample_data/tasks_config.json <base model name/chkpoint path> <expt name>
86
- ```
87
- **Fusion**
88
-
89
- python pl_training.py --gpu 2 --adapter-type fusion --tasks-config sample_data/tasks_config.json <base model name/chkpoint path> <expt name>
90
-
91
- ### Additional Parameters
92
-
93
- ```positional arguments:
94
-
95
- model HuggingFace model to be used
96
-
97
- version experiment version
98
-
99
-
100
-
101
- optional arguments:
102
-
103
- -h, --help show this help message and exit
104
-
105
- --tasks-config TASKS_CONFG path to the task config file
106
-
107
- --tokenizer TOKENIZER HuggingFace tokenizer to be used (same as model name if not supplied)
108
-
109
- --output OUTPUT dir to save checkpoints and finetuned model
110
-
111
- --pals-config PALS_CONFIG name of config file for PALS architecture
112
-
113
- --adapter-type ADAPTER_TYPE type of adapter architecture (single/fusion)
114
-
115
- --batch-size BATCH_SIZE batch size
116
-
117
- --lr LR initial learning rate
118
-
119
- --peak-lr PEAK_LR initial learning rate
120
-
121
- --warmup WARMUP number of warmup steps
122
-
123
- --epochs EPOCHS number of epochs
124
-
125
- --grad-accum GRAD_ACCUM grad accumulation steps
126
-
127
- --ctrl-tokens use control codes for tasks
128
-
129
- --gpu GPU number of gpus
130
-
131
- --max-len MAX_LEN max sequence length
132
-
133
- --val-check-interval VAL_CHECK_INTERVAL validation loop interval
134
-
135
- --checkpoint CHECKPOINT resume from checkpoint path
136
- ```
137
-
138
- TensorBoard logs and checkpoints are written to `<output>/full_run/<version>` directory, by default `./lightning_logs/full_run/<expt name>`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/bert_pals_config/low_rank_config.json DELETED
@@ -1,15 +0,0 @@
1
- {
2
- "hidden_size_aug": 100,
3
- "mult": true,
4
- "attention_probs_dropout_prob": 0.1,
5
- "hidden_act": "gelu",
6
- "hidden_dropout_prob": 0.1,
7
- "hidden_size": 768,
8
- "initializer_range": 0.02,
9
- "intermediate_size": 3072,
10
- "max_position_embeddings": 512,
11
- "num_attention_heads": 12,
12
- "num_hidden_layers": 12,
13
- "type_vocab_size": 2,
14
- "vocab_size": 30522
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/bert_pals_config/pals.config.json DELETED
@@ -1,16 +0,0 @@
1
- {
2
- "hidden_size_aug": 204,
3
- "mult": true,
4
- "pals": true,
5
- "attention_probs_dropout_prob": 0.1,
6
- "hidden_act": "gelu",
7
- "hidden_dropout_prob": 0.1,
8
- "hidden_size": 768,
9
- "initializer_range": 0.02,
10
- "intermediate_size": 3072,
11
- "max_position_embeddings": 512,
12
- "num_attention_heads": 12,
13
- "num_hidden_layers": 12,
14
- "type_vocab_size": 2,
15
- "vocab_size": 31116
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/mtl_datasets.py DELETED
@@ -1,311 +0,0 @@
1
- import decimal
2
- from typing import Iterator, Tuple, List, Dict, Union, Any, Iterable
3
- import torch
4
- from torch.utils.data import IterableDataset, DataLoader, ChainDataset, get_worker_info
5
- from torch.utils.data.dataset import T_co, Dataset
6
- from transformers import PreTrainedTokenizer, BatchEncoding, AutoTokenizer
7
- import datasets
8
- import numpy as np
9
- from sklearn.model_selection import train_test_split
10
- from sklearn.preprocessing import MultiLabelBinarizer
11
- from skmultilearn.model_selection import IterativeStratification
12
- from abc import ABC, abstractmethod
13
- import itertools
14
- from torch.utils.data._utils.collate import default_collate
15
- from collections import defaultdict
16
- from strategies import BatchingStrategy
17
- import random
18
-
19
- datasets.logging.set_verbosity_error()
20
-
21
-
22
- class AbstractMultiTaskDataset(ABC, IterableDataset):
23
- def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
24
- fields: List[str],
25
- sample_size, ctrl_token: str, max_len: int):
26
- self.task_name = task_name
27
- self.data = data
28
- self.tokenizer = tokenizer
29
- self.fields = fields
30
- self.sample_size = sample_size
31
- self.ctrl_token = ctrl_token
32
- self.max_len = max_len
33
- self._effective_sample_size = sample_size
34
-
35
- def sub_sample(self, json_parse: Iterator[Dict]) -> Iterator:
36
- curr_len = 0
37
- try:
38
- for _ in range(self.effective_sample_size):
39
- curr_len += 1
40
- yield next(json_parse)
41
- except StopIteration:
42
- print(
43
- f"Reqd sample size {self.effective_sample_size} greater than {self.task_name} dataset size {curr_len}, using complete dataset")
44
-
45
- @abstractmethod
46
- def preprocess(self, line: Dict[str, str]) -> Union[
47
- Tuple[str, BatchEncoding, torch.Tensor], List[Tuple[str, List[BatchEncoding]]]]:
48
- pass
49
-
50
- def postprocess_iter(self, curr_iter):
51
- return curr_iter
52
-
53
- @property
54
- def effective_sample_size(self):
55
- return self._effective_sample_size
56
-
57
- @effective_sample_size.setter
58
- def effective_sample_size(self, val):
59
- self._effective_sample_size = val
60
-
61
- def __iter__(self) -> Iterator[T_co]:
62
- json_parse = iter(self.data)
63
- if self.sample_size == -1:
64
- map_itr = map(self.preprocess, json_parse)
65
- else:
66
- map_itr = map(self.preprocess, self.sub_sample(json_parse))
67
- return self.postprocess_iter(map_itr)
68
-
69
- def tokenized_input(self, input_data: Union[Dict[str, str], str], ctrl_token_key: str = None) -> BatchEncoding:
70
- text = []
71
- if type(input_data) == dict:
72
- for field in self.fields:
73
- if input_data[field]:
74
- if type(input_data[field]) in set([decimal.Decimal, float]):
75
- input_data[field] = str(int(input_data[field]))
76
- text.append(input_data[field])
77
- text = (f" {self.tokenizer.sep_token} ".join(text)).strip()
78
- else:
79
- text = input_data
80
- if self.ctrl_token:
81
- ctrl_token = self.ctrl_token if not ctrl_token_key else self.ctrl_token[ctrl_token_key]
82
- text = ctrl_token + " " + text
83
- input_ids = self.tokenizer(text, padding="max_length", truncation=True, return_tensors="pt",
84
- max_length=self.max_len)
85
- # if self.ctrl_token:
86
- # input_ids["input_ids"] = input_ids["input_ids"][:,1:]
87
- # input_ids["attention_mask"] = input_ids["attention_mask"][:,1:]
88
- return {"input_ids": input_ids["input_ids"].flatten(), "attention_mask": input_ids["attention_mask"].flatten()}
89
-
90
-
91
- class ClassificationDataset(AbstractMultiTaskDataset):
92
- def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
93
- fields: List[str],
94
- label_field: str, labels: Dict[str, int], sample_size=-1, ctrl_token: str = None, max_len: int = 512):
95
- super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
96
- self.labels = labels
97
- self.label_field = label_field
98
-
99
- def label_transform(self, label_raw: str) -> Union[int, np.ndarray]:
100
- return self.labels[label_raw]
101
-
102
- def preprocess(self, line: Dict[str, str]) -> Tuple[str, BatchEncoding, int]:
103
- # Splits the line into text and label and applies preprocessing to the text
104
- label = line[self.label_field]
105
- input_ids = self.tokenized_input(line)
106
- return self.task_name, input_ids, self.label_transform(label)
107
-
108
- def sub_sample(self, json_parse: Iterator[Dict]) -> Iterator:
109
- # json_itr_list = itertools.tee(json_parse, 2)
110
- # json_parse = json_itr_list[0]
111
- X, y = zip(*[(d, self.labels[d[self.label_field]]) for d in json_parse])
112
- X, y = np.array(X), np.array(y)
113
- if X.shape[0] < self.effective_sample_size:
114
- print(
115
- f"Reqd sample size {self.effective_sample_size} greater than {self.task_name} dataset size {X.shape[0]}, using complete dataset")
116
- X_sub = X
117
- else:
118
- X_sub, _, _, _ = train_test_split(X, y, train_size=self.effective_sample_size, random_state=42, stratify=y)
119
- for d in X_sub:
120
- yield d
121
-
122
-
123
- class MultiLabelClassificationDataset(ClassificationDataset):
124
- def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
125
- fields: List[str],
126
- label_field: str, labels: Dict[str, int], sample_size=-1, ctrl_token: str = None, max_len: int = 512):
127
- super().__init__(task_name, data, tokenizer, fields, label_field, labels, sample_size, ctrl_token, max_len)
128
- self.labels = dict(sorted(labels.items()))
129
- self.mlb = MultiLabelBinarizer()
130
- self.mlb.fit([list(self.labels.keys())])
131
-
132
- def label_transform(self, label_raw: List[str]) -> Union[int, np.ndarray]:
133
- return self.mlb.transform([label_raw]).flatten().astype(float)
134
-
135
- def sub_sample(self, json_parse: Iterator[Dict]) -> Iterator:
136
- X, y = zip(*[(d, tuple(d[self.label_field])) for d in json_parse])
137
- X, y = np.array(X), self.mlb.transform(y)
138
- if X.shape[0] < self.effective_sample_size:
139
- print(
140
- f"Reqd sample size {self.effective_sample_size} greater than {self.task_name} dataset size {X.shape[0]}, using complete dataset")
141
- X_sub = X
142
- else:
143
- sub_sample_ratio = self.effective_sample_size / X.shape[0]
144
- stratifier = IterativeStratification(n_splits=2, order=1,
145
- sample_distribution_per_fold=[sub_sample_ratio,
146
- 1 - sub_sample_ratio, ])
147
- _, indices = next(stratifier.split(X, y))
148
- X_sub = X[indices]
149
- for d in X_sub:
150
- yield d
151
-
152
-
153
- class IRDataset(AbstractMultiTaskDataset):
154
- def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
155
- fields: List[str],
156
- sample_size=-1, ctrl_token: str = None, max_len: int = 512):
157
- super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
158
- self.effective_sample_size //= 5
159
-
160
- def preprocess(self, line: Dict[str, str]) -> List[Tuple[str, List[BatchEncoding]]]:
161
- # Splits the line into text and label and applies preprocessing to the text
162
- query, candidates = line["query"], line["candidates"]
163
- pos_candidates, neg_candidates = [c for c in candidates if c["score"]], [c for c in candidates if
164
- not c["score"]]
165
- num_trips = min(5, len(neg_candidates))
166
- new_pos_candidates = pos_candidates.copy()
167
- if pos_candidates:
168
- pos_candidates = itertools.cycle(pos_candidates)
169
- while len(new_pos_candidates) < num_trips:
170
- new_pos_candidates.append(next(pos_candidates))
171
- query_ctrl_key, cand_ctrl_key = None, None
172
- if type(self.ctrl_token) == dict:
173
- query_ctrl_key = "query"
174
- cand_ctrl_key = "candidates"
175
- tokenized_query = self.tokenized_input(query, query_ctrl_key)
176
-
177
- for pos in new_pos_candidates[:num_trips]:
178
- neg = neg_candidates.pop()
179
- tokenized_pos = self.tokenized_input(pos, cand_ctrl_key)
180
- tokenized_neg = self.tokenized_input(neg, cand_ctrl_key)
181
- yield (self.task_name, [tokenized_query, tokenized_pos, tokenized_neg])
182
-
183
- def postprocess_iter(self, curr_iter):
184
- # chained_iter = itertools.chain(*curr_iter)
185
- batched_list = []
186
- try:
187
- while True:
188
- while len(batched_list) < 1000:
189
- batched_list += next(curr_iter)
190
- random.shuffle(batched_list)
191
- for x in batched_list:
192
- yield x
193
- batched_list.clear()
194
- except StopIteration:
195
- random.shuffle(batched_list)
196
- for x in batched_list:
197
- yield x
198
-
199
-
200
- class TripletDataset(AbstractMultiTaskDataset):
201
- def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
202
- fields: List[str],
203
- sample_size=-1, ctrl_token: str = None, max_len: int = 512):
204
- super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
205
-
206
- def preprocess(self, line: Dict[str, str]) -> Union[
207
- Tuple[str, BatchEncoding, torch.Tensor], List[Tuple[str, List[BatchEncoding]]]]:
208
- triplet = []
209
- for key in ("query", "pos", "neg"):
210
- triplet.append(self.tokenized_input(line[key]))
211
- return self.task_name, triplet
212
-
213
-
214
- class CustomChainDataset(ChainDataset):
215
- def __init__(self, datasets: Iterable[Dataset], batch_size, device_rank=0, num_devices=1,
216
- batching_strategy=BatchingStrategy.SEQUENTIAL):
217
- super().__init__(datasets)
218
- self.batch_size = batch_size
219
- self.batching = batching_strategy
220
- self.device_rank = device_rank
221
- self.num_devices = num_devices
222
- self.effective_batch_size = batch_size * num_devices
223
-
224
- def iter_slice(self, curr_iter, worker_info):
225
- curr_batch, idx = dict(), 0
226
- try:
227
- while True:
228
- for _ in range(self.effective_batch_size):
229
- curr_batch[idx] = next(curr_iter)
230
- idx += 1
231
- for i, x in curr_batch.items():
232
- if (i // self.batch_size) % self.num_devices == self.device_rank:
233
- if (i // self.effective_batch_size) % worker_info.num_workers == worker_info.id:
234
- yield x
235
- curr_batch.clear()
236
- except StopIteration:
237
- curr_batch.clear()
238
-
239
- def __iter__(self):
240
- batch_itr = self.batching.value.get_batch_iter(self.datasets, self.effective_batch_size)
241
- worker_info = get_worker_info()
242
- if worker_info:
243
- batch_itr = self.iter_slice(batch_itr, worker_info)
244
-
245
- return batch_itr
246
-
247
-
248
- class RegressionDataset(AbstractMultiTaskDataset):
249
- def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
250
- fields: List[str],
251
- label_field: str, sample_size=-1, ctrl_token: str = None, max_len: int = 512):
252
- super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
253
- self.label_field = label_field
254
-
255
- def preprocess(self, line: Dict[str, str]) -> Tuple[str, Dict[str, BatchEncoding], Union[int, float]]:
256
- # Splits the line into text and label and applies preprocessing to the text
257
- label = np.float32(line[self.label_field])
258
- input_ids = self.tokenized_input(line)
259
- return self.task_name, input_ids, label
260
-
261
-
262
- def multi_collate(batch: List[Any]) -> Dict[str, List[Any]]:
263
- task_sub_batch = defaultdict(list)
264
- for b in batch:
265
- task_sub_batch[b[0]].append(b[1:])
266
- return {task: default_collate(sub_batch) for task, sub_batch in task_sub_batch.items()}
267
-
268
-
269
- if __name__ == '__main__':
270
- tokenizer = AutoTokenizer.from_pretrained("allenai/specter")
271
- tokenizer.add_special_tokens({'additional_special_tokens': ["[CLF]"]})
272
- with open("sample_data/mesh_descriptors.txt", "r") as f:
273
- labels = f.readlines()
274
- labels = {l.strip(): i for i, l in enumerate(labels)}
275
- cls_dataset = ClassificationDataset(task_name="mesh", data=
276
- datasets.load_dataset("json", data_files="../../scidocs/data/mesh_plus/train.json", streaming=True)["train"],
277
- tokenizer=tokenizer,
278
- fields=["title", "abstract"],
279
- label_field="descriptor", labels=labels, sample_size=400000)
280
- trip_dataset = IRDataset(task_name="s2and", data=
281
- datasets.load_dataset("json", data_files="sample_data/s2and_small.json", streaming=True)["train"],
282
- tokenizer=tokenizer,
283
- fields=["title", "abstract"], sample_size=400000)
284
- specter_dataset = TripletDataset(task_name="specter", data=
285
- datasets.load_dataset("json", data_files="../../scidocs/data/specter_triplets/train.json", streaming=True)["train"],
286
- tokenizer=tokenizer,
287
- fields=["title", "abstract"], sample_size=400000)
288
- search_dataset = IRDataset(task_name="search", data=
289
- datasets.load_dataset("json", data_files="sample_data/search_small.jsonl", streaming=True)["train"],
290
- tokenizer=tokenizer,
291
- fields=["title", "abstract", "venue", "year"], sample_size=100)
292
- with open("sample_data/fos_labels.txt", "r") as f:
293
- mlc_labels = f.readlines()
294
- mlc_labels = {l.strip(): i for i, l in enumerate(mlc_labels)}
295
-
296
- ml_cls_dataset = MultiLabelClassificationDataset(task_name="fos", data_src="sample_data/fos_small.json",
297
- tokenizer=tokenizer,
298
- fields=["title", "abstract"],
299
- label_field="labels_text", labels=mlc_labels, sample_size=100,
300
- ctrl_token="[CLF]")
301
-
302
- batch_size = 16
303
- multi_dataset = CustomChainDataset([ml_cls_dataset], batch_size=batch_size,
304
- batching_strategy=BatchingStrategy.MIXED_PROPORTIONAL)
305
- dataloader = DataLoader(multi_dataset, batch_size=batch_size, collate_fn=multi_collate, num_workers=8)
306
- for i, data in enumerate(dataloader):
307
- print(i)
308
- for task, batch in data.items():
309
- d = batch[-1][-1] if task in ("s2and", "specter", "search") else batch[-1]
310
- print(task, d.shape[0])
311
- print(batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/pl_training.py DELETED
@@ -1,325 +0,0 @@
1
- import json
2
- import sys
3
-
4
- # setting path
5
- sys.path.append('../')
6
-
7
- import argparse
8
- from typing import Dict, Optional, Any
9
- import datasets
10
- import pytorch_lightning as pl
11
- import torch
12
- import torch.nn
13
- from pytorch_lightning.callbacks import ModelCheckpoint
14
- from pytorch_lightning.loggers import TensorBoardLogger
15
- from pytorch_lightning.utilities.distributed import rank_zero_only
16
- from pytorch_lightning.utilities.distributed import sync_ddp_if_available
17
- from pytorch_lightning.utilities.types import TRAIN_DATALOADERS, EVAL_DATALOADERS, STEP_OUTPUT
18
- from torch.distributed import ReduceOp
19
- from torch.utils.data import DataLoader
20
- from transformers import AdamW, get_linear_schedule_with_warmup
21
- from transformers import AutoTokenizer, AutoModel, AutoConfig
22
-
23
- from adapter_fusion import AdapterFactory
24
- from bert_pals import BertPalsEncoder
25
- from mtl_datasets import ClassificationDataset, multi_collate, MultiLabelClassificationDataset, IRDataset, \
26
- CustomChainDataset, TripletDataset, RegressionDataset
27
- from schedulers import InverseSquareRootSchedule, InverseSquareRootScheduleConfig
28
- from strategies import BatchingStrategy
29
- from tasks import TaskFamily, load_tasks
30
-
31
- pl.seed_everything(42, workers=True)
32
-
33
-
34
- def init_weights(modules):
35
- for module in modules:
36
- module.linear.weight.data.normal_(mean=0.0, std=0.02)
37
- if module.linear.bias is not None:
38
- module.linear.bias.data.zero_()
39
-
40
-
41
- pl_to_split_map = {"fit": "train", "validate": "dev", "test": "test", "predict": "test"}
42
-
43
-
44
- class SciRepTrain(pl.LightningModule):
45
- def __init__(self, batch_size: int, init_lr: float, peak_lr: float, tokenizer: str, model: str, warmup_steps: int,
46
- log_dir: str,
47
- use_ctrl_tokens=False,
48
- task_dict: Dict[str, TaskFamily] = None,
49
- pals_cfg: str = None, adapter_type: str = None, max_len: int = 512, load_adapters_as=None):
50
- super().__init__()
51
- self.task_dict = load_tasks() if not task_dict else task_dict
52
- print(self.task_dict.keys())
53
- self.heads = torch.nn.ModuleDict(
54
- {t.name: t.head for t in self.task_dict.values() if t.head}
55
- )
56
- self.init_loss = None
57
- self.task_idx = {t: i for i, t in enumerate(self.task_dict)}
58
- self.loss_wt = torch.ones(len(self.task_dict)).float()
59
- init_weights(self.heads.values())
60
- self.warmup_steps = warmup_steps
61
- self.multi_train = None
62
- self.multi_test = None
63
- self.multi_val = None
64
- self.pals = pals_cfg is not None
65
- self.adapters = adapter_type is not None
66
- self.use_ctrl_tokens = use_ctrl_tokens
67
- spl_ctrl_tokens = set()
68
- for t in self.task_dict.values():
69
- if type(t.ctrl_token) == str:
70
- spl_ctrl_tokens.add(t.ctrl_token)
71
- else:
72
- spl_ctrl_tokens.update(t.ctrl_token.values())
73
- spl_ctrl_tokens = sorted(list(spl_ctrl_tokens))
74
- task_ids = spl_ctrl_tokens
75
- self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
76
-
77
- if self.adapters:
78
- adapters_dir = f'{log_dir}/model/adapters/' if not load_adapters_as else load_adapters_as
79
- try:
80
- adapters_dir = json.loads(adapters_dir)
81
- except:
82
- pass
83
- self.encoder = AdapterFactory.get_adapter(model, task_ids,
84
- adapter_type == "fusion", adapters_dir)
85
- else:
86
- self.encoder = AutoModel.from_pretrained(model)
87
- if self.pals:
88
- self.encoder = BertPalsEncoder(f"bert_pals_config/{pals_cfg}", task_ids, self.encoder)
89
- if self.use_ctrl_tokens:
90
- print("Using Control Tokens", spl_ctrl_tokens)
91
- special_tokens_dict = {'additional_special_tokens': spl_ctrl_tokens}
92
- num_added_toks = self.tokenizer.add_special_tokens(special_tokens_dict)
93
- self.encoder.resize_token_embeddings(len(self.tokenizer))
94
- self.batch_size = batch_size
95
- self.init_lr = init_lr
96
- self.peak_lr = peak_lr
97
- self.max_len = max_len
98
- self.save_hyperparameters(ignore=["task_dict"])
99
-
100
- def forward(self, input_ids, attention_mask=None, token_idx=0, task_id=None):
101
- if not self.pals:
102
- embedding = self.encoder(input_ids, attention_mask=attention_mask) if not self.adapters else self.encoder(
103
- input_ids,
104
- attention_mask=attention_mask,
105
- task_id=task_id)
106
- return embedding.last_hidden_state[:, token_idx, :]
107
- else:
108
- embedding = self.encoder(input_ids, attention_mask=attention_mask, task_id=task_id)
109
- return embedding[:, token_idx, :]
110
-
111
- def configure_optimizers(self):
112
- """Prepare optimizer and schedule (linear warmup and decay)"""
113
- no_decay = ["bias", "LayerNorm.weight"]
114
- optimizer_grouped_parameters = [
115
- {
116
- "params": [p for n, p in self.named_parameters() if
117
- p.requires_grad and not any(nd in n for nd in no_decay)],
118
- "weight_decay": 0.0,
119
- },
120
- {
121
- "params": [p for n, p in self.named_parameters() if
122
- p.requires_grad and any(nd in n for nd in no_decay)],
123
- "weight_decay": 0.0,
124
- }
125
- ]
126
- optimizer = AdamW(
127
- optimizer_grouped_parameters, lr=self.init_lr, eps=1e-8
128
- )
129
-
130
- self.opt = optimizer
131
- if self.pals or self.adapters:
132
- scheduler = get_linear_schedule_with_warmup(optimizer, self.warmup_steps, 77500)
133
- else:
134
- scheduler_config = InverseSquareRootScheduleConfig(warmup_updates=self.warmup_steps,
135
- warmup_init_lr=self.init_lr,
136
- lr=self.peak_lr)
137
- scheduler = InverseSquareRootSchedule(scheduler_config, optimizer)
138
-
139
- return {
140
- "optimizer": optimizer,
141
- "lr_scheduler": {
142
- "scheduler": scheduler,
143
- "interval": "step",
144
- "frequency": 1}
145
- }
146
-
147
- def calc_loss(self, train_batch, batch_idx):
148
- losses, loss_per_task = [], torch.zeros(len(self.task_dict)).cuda()
149
- scl = torch.tensor(0.0)
150
- for name, batch in train_batch.items():
151
- task = self.task_dict[name]
152
- idx = 0 if not self.use_ctrl_tokens else 1
153
- task_id = task.ctrl_token
154
- if task.type not in set(["classification", "regression"]):
155
- query, pos, neg = batch[0][0], batch[0][1], batch[0][2]
156
- query_ctrl = cand_ctrl = task_id
157
- if type(task_id) == dict:
158
- query_ctrl = task_id["query"]
159
- cand_ctrl = task_id["candidates"]
160
- query_emb, pos_emb, neg_emb = self(query['input_ids'], query['attention_mask'], idx, query_ctrl), self(
161
- pos['input_ids'], pos['attention_mask'], idx, cand_ctrl), self(neg['input_ids'],
162
- neg['attention_mask'], idx,
163
- cand_ctrl)
164
- curr_loss = task.loss(query_emb, pos_emb, neg_emb)
165
- else:
166
- x, y = batch[0], batch[1]
167
- encoding = self(x['input_ids'], x['attention_mask'], idx, task_id)
168
- logits = self.heads[name](encoding)
169
- if task.type == "regression":
170
- logits = logits.squeeze()
171
- curr_loss = task.loss(logits, y)
172
- if task.multi_label:
173
- curr_loss = torch.mean(curr_loss, dim=1)
174
- elif task.contrastive_loss:
175
- scl = task.contrastive_loss(encoding, y, self.heads[name].num_labels)
176
- curr_loss = 0.1 * curr_loss + 0.9 * scl
177
- loss_per_task[self.task_idx[name]] = torch.mean(curr_loss)
178
- return loss_per_task
179
-
180
- def training_step(self, train_batch, batch_idx):
181
- loss_per_task = self.calc_loss(train_batch, batch_idx)
182
- loss = torch.sum(loss_per_task)
183
- self.log("train_loss", loss, prog_bar=True, on_step=True, on_epoch=True, batch_size=self.batch_size)
184
- self.log("lr", self.lr_schedulers().get_last_lr()[-1], on_step=True, on_epoch=False, prog_bar=True, logger=True)
185
- return {"loss": loss}
186
-
187
- def validation_step(self, train_batch, batch_idx) -> Optional[STEP_OUTPUT]:
188
- loss_per_task = self.calc_loss(train_batch, batch_idx)
189
- # loss_per_task = torch.mul(self.loss_wt.cuda(), loss_per_task)
190
- loss = torch.sum(loss_per_task)
191
- dist_loss_per_task = loss_per_task.clone().data
192
- dist_loss_per_task = sync_ddp_if_available(dist_loss_per_task, reduce_op=ReduceOp.SUM)
193
- for task in self.task_dict:
194
- self.log(f"val_loss_{task}", dist_loss_per_task[self.task_idx[task]], on_step=True, on_epoch=True,
195
- prog_bar=False,
196
- batch_size=self.batch_size, rank_zero_only=True)
197
- self.log("val_loss", loss, on_step=True, on_epoch=False, prog_bar=True)
198
- self.log("avg_val_loss", loss, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=self.batch_size)
199
- return {"val_loss": loss}
200
-
201
- def load_data(self, split) -> CustomChainDataset:
202
- hf_split = "validation" if split == "dev" else "train"
203
- dataset_list = []
204
- task_dataset_map = {"classification": ClassificationDataset, "regression": RegressionDataset, "ir": IRDataset}
205
- for t_name, task in self.task_dict.items():
206
- data_file = {hf_split: task.data_files[split]} if task.data_files else None
207
- dataset_name = (task.dataset, hf_split)
208
- data_src = data_file if data_file else dataset_name
209
- op_token = task.ctrl_token if self.use_ctrl_tokens else None
210
- if type(data_src) == dict:
211
- data = datasets.load_dataset("json", data_files=data_src, streaming=True)[
212
- next(iter(data_src.keys()))]
213
- else:
214
- data = datasets.load_dataset(**data_src[0], split=data_src[1], streaming=True)
215
- kwargs = {"data": data, "ctrl_token": op_token, "max_len": self.max_len, "task_name": t_name,
216
- "tokenizer": self.tokenizer, "fields": task.input_fields,
217
- "sample_size": task.sample_size[split] if type(task.sample_size) == dict else task.sample_size}
218
-
219
- if task.type == "classification":
220
- kwargs.update({"label_field": task.labels_field, "labels": task.labels})
221
- elif task.type == "regression":
222
- kwargs.update({"label_field": task.labels_field})
223
- if task.multi_label:
224
- dataset_list.append(MultiLabelClassificationDataset(**kwargs))
225
- else:
226
- dataset_list.append(task_dataset_map.get(task.type, TripletDataset)(**kwargs))
227
- multi_dataset = CustomChainDataset(dataset_list, batch_size=self.batch_size,
228
- device_rank=self.trainer.global_rank, num_devices=self.trainer.world_size,
229
- batching_strategy=BatchingStrategy.MIXED_PROPORTIONAL)
230
- if split == "train":
231
- self.multi_train = multi_dataset
232
- elif split == "dev":
233
- self.multi_val = multi_dataset
234
-
235
- def setup(self, stage: Optional[str] = None) -> None:
236
- self.load_data("train")
237
-
238
- def train_dataloader(self) -> TRAIN_DATALOADERS:
239
- return DataLoader(self.multi_train, batch_size=self.batch_size, collate_fn=multi_collate, num_workers=8,
240
- pin_memory=True)
241
-
242
- def val_dataloader(self) -> EVAL_DATALOADERS:
243
- self.load_data("dev")
244
- return DataLoader(self.multi_val, batch_size=self.batch_size, collate_fn=multi_collate, num_workers=8)
245
-
246
- @rank_zero_only
247
- def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
248
- try:
249
- logger = self.logger
250
- log_dir = f'{logger.save_dir}/{logger.name}/{logger.version}/checkpoints'
251
- self.tokenizer.save_pretrained(f'{log_dir}/tokenizer/')
252
- self.tokenizer.save_vocabulary(f'{log_dir}/tokenizer/')
253
- self.encoder.save_pretrained(f'{log_dir}/model')
254
- except:
255
- print("Exception encountered while saving, try agin from checkpoint")
256
-
257
-
258
- if __name__ == '__main__':
259
- torch.multiprocessing.set_sharing_strategy('file_system')
260
- parser = argparse.ArgumentParser()
261
- parser.add_argument('--tasks-config', help='path to the task config file', default="sample_data/tasks_config.json")
262
- parser.add_argument('model', help='HuggingFace model to be used')
263
- parser.add_argument('--tokenizer', help='HuggingFace tokenizer to be used (same as model name if not supplied)',
264
- default=None)
265
- parser.add_argument('--output', help='dir to save checkpoints and finetuned model', default="./lightning_logs/")
266
- parser.add_argument('version', help='experiment version')
267
- parser.add_argument('--pals-config', default=None, help='path to config file for PALS architecture')
268
- parser.add_argument('--adapter-type', default=None, help='type of adapter architecture (single/fusion)')
269
- parser.add_argument('--adapters-chkpt', default=None,
270
- help='Adapters to be loaded either from a directory path or a dictionary of pretrained huggingface adapters with id')
271
- parser.add_argument('--batch-size', type=int, default=16, help='batch size')
272
- parser.add_argument('--lr', type=float, default=1e-4, help='initial learning rate')
273
- parser.add_argument('--peak-lr', type=float, default=5e-5, help='initial learning rate')
274
- parser.add_argument('--warmup', type=int, default=700, help='number of warmup steps')
275
- parser.add_argument('--epochs', type=int, default=2, help='number of epochs')
276
- parser.add_argument('--grad-accum', type=int, default=8, help='grad accumulation steps')
277
- parser.add_argument('--ctrl-tokens', action='store_true', default=False, help='use control codes for tasks')
278
- parser.add_argument('--gpu', type=int, default=None, help='number of gpus')
279
- parser.add_argument('--max-len', type=int, default=512, help='max sequence length')
280
- parser.add_argument('--val-check_interval', type=float, default=1.0, help='validation loop interval')
281
- parser.add_argument('--checkpoint', default=None, help='resume from checkpoint path')
282
-
283
- args = parser.parse_args()
284
- mconfig = AutoConfig.from_pretrained(args.model)
285
- tasks_dict = load_tasks(args.tasks_config, mconfig.hidden_size)
286
- log_dir = args.output
287
- logger = TensorBoardLogger(
288
- save_dir=log_dir,
289
- version=args.version,
290
- name='full_run',
291
- )
292
-
293
- # second part of the path shouldn't be f-string
294
- filepath = f'{log_dir}/{logger.name}/{logger.version}/checkpoints/'
295
- checkpoint_callback = ModelCheckpoint(
296
- dirpath=filepath,
297
- filename='ep-{epoch}_avg_val_loss-{avg_val_loss:.3f}',
298
- save_top_k=4,
299
- verbose=True,
300
- monitor='avg_val_loss', # monitors metrics logged by self.log.
301
- mode='min'
302
- )
303
-
304
- model = SciRepTrain(batch_size=args.batch_size, init_lr=args.lr,
305
- peak_lr=args.peak_lr,
306
- tokenizer=args.tokenizer if args.tokenizer else args.model,
307
- model=args.model,
308
- warmup_steps=args.warmup,
309
- use_ctrl_tokens=args.ctrl_tokens, task_dict=tasks_dict, pals_cfg=args.pals_config,
310
- adapter_type=args.adapter_type, log_dir=filepath, max_len=args.max_len,
311
- load_adapters_as=args.adapters_chkpt)
312
-
313
- hparams = {"gpus": args.gpu, "val_check_interval": args.val_check_interval, "num_sanity_val_steps": 4,
314
- "max_epochs": args.epochs,
315
- "accumulate_grad_batches": args.grad_accum, "resume_from_checkpoint": args.checkpoint}
316
-
317
- trainer = pl.Trainer(logger=logger,
318
- strategy="ddp" if hparams["gpus"] > 1 else None,
319
- enable_checkpointing=True,
320
- callbacks=[checkpoint_callback],
321
- precision=16,
322
- **hparams)
323
- logger.log_hyperparams(hparams)
324
- logger.log_hyperparams({"tasks": {k: str(v) for k, v in tasks_dict.items()}})
325
- trainer.fit(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/sample_data/fos_labels.txt DELETED
@@ -1,23 +0,0 @@
1
- Agricultural and Food sciences
2
- Art
3
- Biology
4
- Business
5
- Chemistry
6
- Computer science
7
- Economics
8
- Education
9
- Engineering
10
- Environmental science
11
- Geography
12
- Geology
13
- History
14
- Law
15
- Linguistics
16
- Materials science
17
- Mathematics
18
- Medicine
19
- Philosophy
20
- Physics
21
- Political science
22
- Psychology
23
- Sociology
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/sample_data/fos_small.json DELETED
The diff for this file is too large to render. See raw diff
 
training/sample_data/mesh_descriptors.txt DELETED
@@ -1,30 +0,0 @@
1
- Brain
2
- Neoplasms
3
- Breast Neoplasms
4
- Liver
5
- Anti-Bacterial Agents
6
- Neurons
7
- Antineoplastic Agents
8
- HIV Infections
9
- DNA
10
- Proteins
11
- Calcium
12
- Hypertension
13
- Postoperative Complications
14
- Escherichia coli
15
- Lung Neoplasms
16
- Bacterial Proteins
17
- Aging
18
- Obesity
19
- Kidney
20
- Myocardial Infarction
21
- Diabetes Mellitus, Type 2
22
- Lung
23
- Liver Neoplasms
24
- Mental Disorders
25
- Asthma
26
- Prostatic Neoplasms
27
- Skin Neoplasms
28
- Cardiovascular Diseases
29
- Carcinoma, Squamous Cell
30
- Adenocarcinoma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training/sample_data/mesh_small.json DELETED
The diff for this file is too large to render. See raw diff
 
training/sample_data/s2and_small.json DELETED
The diff for this file is too large to render. See raw diff
 
training/sample_data/search_small.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
training/sample_data/specter_small.json DELETED
The diff for this file is too large to render. See raw diff