BartekSadlej
commited on
Commit
•
bc9e587
1
Parent(s):
47cb090
Upload model
Browse files- README.md +199 -0
- config.json +57 -0
- config.py +31 -0
- model.py +265 -0
- model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
tags: []
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"ILKTModel"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "config.ILKTConfig",
|
7 |
+
"AutoModel": "model.ILKTModel"
|
8 |
+
},
|
9 |
+
"backbone_config": {
|
10 |
+
"pretrained_model_name_or_path": "google-bert/bert-base-multilingual-cased",
|
11 |
+
"torch_dtype": "bfloat16",
|
12 |
+
"trust_remote_code": true
|
13 |
+
},
|
14 |
+
"cls_head_config": {
|
15 |
+
"dropout": 0.0,
|
16 |
+
"n_dense": 1,
|
17 |
+
"pool_type": "cls",
|
18 |
+
"use_batch_norm": true,
|
19 |
+
"use_layer_norm": false
|
20 |
+
},
|
21 |
+
"cls_heads": [
|
22 |
+
[
|
23 |
+
3,
|
24 |
+
"allegro--klej-cdsc-e"
|
25 |
+
],
|
26 |
+
[
|
27 |
+
2,
|
28 |
+
"allegro--klej-psc"
|
29 |
+
],
|
30 |
+
[
|
31 |
+
2,
|
32 |
+
"allegro--klej-dyk"
|
33 |
+
],
|
34 |
+
[
|
35 |
+
5,
|
36 |
+
"PL-MTEB--scifield"
|
37 |
+
]
|
38 |
+
],
|
39 |
+
"embedding_head_config": {
|
40 |
+
"dropout": 0.0,
|
41 |
+
"n_dense": 1,
|
42 |
+
"normalize_embeddings": false,
|
43 |
+
"pool_type": "cls",
|
44 |
+
"use_batch_norm": false,
|
45 |
+
"use_layer_norm": false
|
46 |
+
},
|
47 |
+
"hidden_size": 768,
|
48 |
+
"mlm_head_config": {
|
49 |
+
"dropout": 0.0,
|
50 |
+
"n_dense": 1,
|
51 |
+
"use_batch_norm": false,
|
52 |
+
"use_layer_norm": true
|
53 |
+
},
|
54 |
+
"model_type": "ILKT",
|
55 |
+
"torch_dtype": "float32",
|
56 |
+
"transformers_version": "4.41.2"
|
57 |
+
}
|
config.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Tuple
|
2 |
+
|
3 |
+
from transformers import PretrainedConfig
|
4 |
+
|
5 |
+
|
6 |
+
class ILKTConfig(PretrainedConfig):
|
7 |
+
|
8 |
+
model_type = "ILKT"
|
9 |
+
|
10 |
+
def __init__(
|
11 |
+
self,
|
12 |
+
backbone_config: Dict[str, Any] = {},
|
13 |
+
embedding_head_config: Dict[str, Any] = {},
|
14 |
+
mlm_head_config: Dict[str, Any] = {},
|
15 |
+
cls_head_config: Dict[str, Any] = {},
|
16 |
+
cls_heads: List[Tuple[int, str]] = [],
|
17 |
+
max_length: int = 512,
|
18 |
+
**kwargs
|
19 |
+
):
|
20 |
+
self.backbone_config = backbone_config
|
21 |
+
self.embedding_head_config = embedding_head_config
|
22 |
+
self.mlm_head_config = mlm_head_config
|
23 |
+
self.cls_head_config = cls_head_config
|
24 |
+
self.cls_heads = cls_heads
|
25 |
+
self.max_length = False
|
26 |
+
self.output_hidden_states = False
|
27 |
+
|
28 |
+
# TODO:
|
29 |
+
# make config a proper HF config, save max length ets, don't know how it works exactly in hf ecosystem
|
30 |
+
|
31 |
+
super().__init__(**kwargs)
|
model.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, Optional
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
from transformers import AutoConfig, AutoModel, PreTrainedModel
|
6 |
+
from transformers.modeling_outputs import (
|
7 |
+
BaseModelOutputWithPooling,
|
8 |
+
MaskedLMOutput,
|
9 |
+
BaseModelOutput,
|
10 |
+
SequenceClassifierOutput,
|
11 |
+
)
|
12 |
+
from enum import Enum
|
13 |
+
|
14 |
+
from .config import ILKTConfig
|
15 |
+
|
16 |
+
def cls_pooling(last_hidden_state, attention_mask):
|
17 |
+
return last_hidden_state[:, 0, :]
|
18 |
+
|
19 |
+
|
20 |
+
def create_head_blocks(
|
21 |
+
hidden_size: int,
|
22 |
+
n_dense: int,
|
23 |
+
use_batch_norm: bool,
|
24 |
+
use_layer_norm: bool,
|
25 |
+
dropout: float,
|
26 |
+
**kwargs,
|
27 |
+
) -> nn.Module:
|
28 |
+
blocks = []
|
29 |
+
for _ in range(n_dense):
|
30 |
+
blocks.append(nn.Linear(hidden_size, hidden_size))
|
31 |
+
if use_batch_norm:
|
32 |
+
blocks.append(nn.BatchNorm1d(hidden_size))
|
33 |
+
elif use_layer_norm:
|
34 |
+
blocks.append(nn.LayerNorm(hidden_size))
|
35 |
+
blocks.append(nn.ReLU())
|
36 |
+
if dropout > 0:
|
37 |
+
blocks.append(nn.Dropout(dropout))
|
38 |
+
return nn.Sequential(*blocks)
|
39 |
+
|
40 |
+
|
41 |
+
class SentenceEmbeddingHead(nn.Module):
|
42 |
+
def __init__(
|
43 |
+
self, backbone_hidden_size: int, embedding_head_config: Dict[str, Any]
|
44 |
+
):
|
45 |
+
super().__init__()
|
46 |
+
self.config = embedding_head_config
|
47 |
+
|
48 |
+
self.head = nn.Sequential(
|
49 |
+
*[
|
50 |
+
create_head_blocks(backbone_hidden_size, **embedding_head_config),
|
51 |
+
]
|
52 |
+
)
|
53 |
+
|
54 |
+
def forward(
|
55 |
+
self, backbone_output: BaseModelOutput, attention_mask: torch.Tensor, **kwargs
|
56 |
+
) -> BaseModelOutputWithPooling:
|
57 |
+
if self.config["pool_type"] == "cls":
|
58 |
+
embeddings = cls_pooling(backbone_output.last_hidden_state, attention_mask)
|
59 |
+
else:
|
60 |
+
raise NotImplementedError(
|
61 |
+
f"Pooling type {self.config['pool_type']} not implemented"
|
62 |
+
)
|
63 |
+
embeddings = self.head(embeddings)
|
64 |
+
if self.config["normalize_embeddings"]:
|
65 |
+
embeddings = nn.functional.normalize(embeddings, p=2, dim=-1)
|
66 |
+
return BaseModelOutputWithPooling(
|
67 |
+
last_hidden_state=backbone_output.last_hidden_state,
|
68 |
+
pooler_output=embeddings, # type: ignore
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
class MLMHead(nn.Module):
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
backbone_hidden_size: int,
|
76 |
+
vocab_size: int,
|
77 |
+
mlm_head_config: Dict[str, Any],
|
78 |
+
):
|
79 |
+
super().__init__()
|
80 |
+
self.config = mlm_head_config
|
81 |
+
|
82 |
+
self.head = nn.Sequential(
|
83 |
+
*[
|
84 |
+
create_head_blocks(backbone_hidden_size, **mlm_head_config),
|
85 |
+
nn.Linear(backbone_hidden_size, vocab_size),
|
86 |
+
]
|
87 |
+
)
|
88 |
+
|
89 |
+
def forward(
|
90 |
+
self,
|
91 |
+
backbone_output: BaseModelOutput,
|
92 |
+
attention_mask: torch.Tensor,
|
93 |
+
labels: Optional[torch.Tensor] = None,
|
94 |
+
**kwargs,
|
95 |
+
) -> MaskedLMOutput:
|
96 |
+
prediction_scores = self.head(backbone_output.last_hidden_state)
|
97 |
+
|
98 |
+
loss = None
|
99 |
+
if labels is not None:
|
100 |
+
loss_fct = nn.CrossEntropyLoss()
|
101 |
+
loss = loss_fct(
|
102 |
+
prediction_scores.view(-1, prediction_scores.size(-1)),
|
103 |
+
labels.view(-1),
|
104 |
+
)
|
105 |
+
return MaskedLMOutput(loss=loss, logits=prediction_scores)
|
106 |
+
|
107 |
+
|
108 |
+
class CLSHead(nn.Module):
|
109 |
+
def __init__(
|
110 |
+
self,
|
111 |
+
backbone_hidden_size: int,
|
112 |
+
n_classes: int,
|
113 |
+
cls_head_config: Dict[str, Any],
|
114 |
+
):
|
115 |
+
super().__init__()
|
116 |
+
self.config = cls_head_config
|
117 |
+
|
118 |
+
self.head = nn.Sequential(
|
119 |
+
*[
|
120 |
+
create_head_blocks(backbone_hidden_size, **cls_head_config),
|
121 |
+
nn.Linear(backbone_hidden_size, n_classes),
|
122 |
+
]
|
123 |
+
)
|
124 |
+
|
125 |
+
def forward(
|
126 |
+
self,
|
127 |
+
backbone_output: BaseModelOutput,
|
128 |
+
attention_mask: torch.Tensor,
|
129 |
+
labels: Optional[torch.Tensor] = None,
|
130 |
+
**kwargs,
|
131 |
+
) -> SequenceClassifierOutput:
|
132 |
+
if self.config["pool_type"] == "cls":
|
133 |
+
embeddings = cls_pooling(backbone_output.last_hidden_state, attention_mask)
|
134 |
+
else:
|
135 |
+
raise NotImplementedError(
|
136 |
+
f"Pooling type {self.config['pool_type']} not implemented"
|
137 |
+
)
|
138 |
+
|
139 |
+
prediction_scores = self.head(embeddings)
|
140 |
+
|
141 |
+
loss = None
|
142 |
+
if labels is not None:
|
143 |
+
loss_fct = nn.CrossEntropyLoss()
|
144 |
+
loss = loss_fct(
|
145 |
+
prediction_scores.view(-1, prediction_scores.size(-1)),
|
146 |
+
labels.view(-1),
|
147 |
+
)
|
148 |
+
return SequenceClassifierOutput(loss=loss, logits=prediction_scores)
|
149 |
+
|
150 |
+
|
151 |
+
class ForwardRouting(Enum):
|
152 |
+
GET_SENTENCE_EMBEDDING = "get_sentence_embedding"
|
153 |
+
GET_MLM_OUTPUT = "get_mlm_output"
|
154 |
+
GET_CLS_OUTPUT = "get_cls_output"
|
155 |
+
|
156 |
+
|
157 |
+
class ILKTModel(PreTrainedModel):
|
158 |
+
config_class = ILKTConfig
|
159 |
+
|
160 |
+
def __init__(self, config: ILKTConfig):
|
161 |
+
super().__init__(config)
|
162 |
+
|
163 |
+
backbone_config = AutoConfig.from_pretrained(**config.backbone_config)
|
164 |
+
pretrained_model_name_or_path = config.backbone_config[
|
165 |
+
"pretrained_model_name_or_path"
|
166 |
+
]
|
167 |
+
self.backbone = AutoModel.from_pretrained(
|
168 |
+
pretrained_model_name_or_path, config=backbone_config
|
169 |
+
)
|
170 |
+
|
171 |
+
backbone_hidden_size = backbone_config.hidden_size
|
172 |
+
self.config.hidden_size = backbone_hidden_size
|
173 |
+
backbone_vocab_size = backbone_config.vocab_size
|
174 |
+
self.embedding_head = SentenceEmbeddingHead(
|
175 |
+
backbone_hidden_size, config.embedding_head_config
|
176 |
+
)
|
177 |
+
self.mlm_head = MLMHead(
|
178 |
+
backbone_hidden_size, backbone_vocab_size, config.mlm_head_config
|
179 |
+
)
|
180 |
+
|
181 |
+
self.cls_heads = nn.ModuleDict(
|
182 |
+
dict(
|
183 |
+
[
|
184 |
+
(
|
185 |
+
name,
|
186 |
+
CLSHead(
|
187 |
+
backbone_hidden_size, n_classes, config.cls_head_config
|
188 |
+
),
|
189 |
+
)
|
190 |
+
for n_classes, name in config.cls_heads
|
191 |
+
]
|
192 |
+
)
|
193 |
+
)
|
194 |
+
|
195 |
+
def forward(
|
196 |
+
self,
|
197 |
+
input_ids: torch.Tensor,
|
198 |
+
attention_mask: torch.Tensor,
|
199 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
200 |
+
forward_routing: ForwardRouting = ForwardRouting.GET_SENTENCE_EMBEDDING,
|
201 |
+
**kwargs,
|
202 |
+
):
|
203 |
+
if forward_routing == ForwardRouting.GET_SENTENCE_EMBEDDING:
|
204 |
+
return self.get_sentence_embedding(
|
205 |
+
input_ids, attention_mask, token_type_ids=token_type_ids
|
206 |
+
)
|
207 |
+
elif forward_routing == ForwardRouting.GET_MLM_OUTPUT:
|
208 |
+
return self.get_mlm_output(
|
209 |
+
input_ids, attention_mask, token_type_ids=token_type_ids, **kwargs
|
210 |
+
)
|
211 |
+
elif forward_routing == ForwardRouting.GET_CLS_OUTPUT:
|
212 |
+
return self.get_cls_output(
|
213 |
+
input_ids, attention_mask, token_type_ids=token_type_ids, **kwargs
|
214 |
+
)
|
215 |
+
else:
|
216 |
+
raise ValueError(f"Unknown forward routing {forward_routing}")
|
217 |
+
|
218 |
+
def get_sentence_embedding(
|
219 |
+
self, input_ids: torch.Tensor, attention_mask: torch.Tensor, **kwargs
|
220 |
+
):
|
221 |
+
backbone_output: BaseModelOutput = self.backbone(
|
222 |
+
input_ids=input_ids, attention_mask=attention_mask, **kwargs
|
223 |
+
)
|
224 |
+
|
225 |
+
embedding_output = self.embedding_head(
|
226 |
+
backbone_output, attention_mask, **kwargs
|
227 |
+
)
|
228 |
+
|
229 |
+
return embedding_output
|
230 |
+
|
231 |
+
def get_mlm_output(
|
232 |
+
self,
|
233 |
+
input_ids: torch.Tensor,
|
234 |
+
attention_mask: torch.Tensor,
|
235 |
+
labels: Optional[torch.Tensor] = None,
|
236 |
+
**kwargs,
|
237 |
+
):
|
238 |
+
backbone_output: BaseModelOutput = self.backbone(
|
239 |
+
input_ids=input_ids, attention_mask=attention_mask, **kwargs
|
240 |
+
)
|
241 |
+
|
242 |
+
mlm_output = self.mlm_head(backbone_output, attention_mask, labels, **kwargs)
|
243 |
+
|
244 |
+
return mlm_output
|
245 |
+
|
246 |
+
def get_cls_output(
|
247 |
+
self,
|
248 |
+
input_ids: torch.Tensor,
|
249 |
+
attention_mask: torch.Tensor,
|
250 |
+
head_name: str,
|
251 |
+
labels: Optional[torch.Tensor] = None,
|
252 |
+
**kwargs,
|
253 |
+
):
|
254 |
+
backbone_output: BaseModelOutput = self.backbone(
|
255 |
+
input_ids=input_ids, attention_mask=attention_mask, **kwargs
|
256 |
+
)
|
257 |
+
|
258 |
+
if head_name not in self.cls_heads:
|
259 |
+
raise ValueError(f"Head {head_name} not found in model")
|
260 |
+
|
261 |
+
cls_output = self.cls_heads[head_name](
|
262 |
+
backbone_output, attention_mask, labels, **kwargs
|
263 |
+
)
|
264 |
+
|
265 |
+
return cls_output
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9bb1f3ac01d553ec242818040c7aa1448079a1ac36b06a5c5240b9572f56f4e
|
3 |
+
size 1093435820
|