BartekSadlej
commited on
Commit
•
6eaa398
1
Parent(s):
06913f6
Upload model
Browse files- README.md +199 -0
- config.json +52 -0
- config.py +31 -0
- model.py +265 -0
- model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
tags: []
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"ILKTModel"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "config.ILKTConfig",
|
7 |
+
"AutoModel": "model.ILKTModel"
|
8 |
+
},
|
9 |
+
"backbone_config": {
|
10 |
+
"pretrained_model_name_or_path": "microsoft/mdeberta-v3-base",
|
11 |
+
"trust_remote_code": true
|
12 |
+
},
|
13 |
+
"cls_head_config": {
|
14 |
+
"dropout": 0.0,
|
15 |
+
"n_dense": 0,
|
16 |
+
"pool_type": "cls",
|
17 |
+
"use_batch_norm": true,
|
18 |
+
"use_layer_norm": false
|
19 |
+
},
|
20 |
+
"cls_heads": [
|
21 |
+
[
|
22 |
+
3,
|
23 |
+
"allegro--klej-cdsc-e"
|
24 |
+
],
|
25 |
+
[
|
26 |
+
2,
|
27 |
+
"allegro--klej-psc"
|
28 |
+
],
|
29 |
+
[
|
30 |
+
2,
|
31 |
+
"allegro--klej-dyk"
|
32 |
+
]
|
33 |
+
],
|
34 |
+
"embedding_head_config": {
|
35 |
+
"dropout": 0.0,
|
36 |
+
"n_dense": 1,
|
37 |
+
"normalize_embeddings": false,
|
38 |
+
"pool_type": "cls",
|
39 |
+
"use_batch_norm": false,
|
40 |
+
"use_layer_norm": false
|
41 |
+
},
|
42 |
+
"hidden_size": 768,
|
43 |
+
"mlm_head_config": {
|
44 |
+
"dropout": 0.0,
|
45 |
+
"n_dense": 0,
|
46 |
+
"use_batch_norm": true,
|
47 |
+
"use_layer_norm": false
|
48 |
+
},
|
49 |
+
"model_type": "ILKT",
|
50 |
+
"torch_dtype": "float32",
|
51 |
+
"transformers_version": "4.41.2"
|
52 |
+
}
|
config.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Tuple
|
2 |
+
|
3 |
+
from transformers import PretrainedConfig
|
4 |
+
|
5 |
+
|
6 |
+
class ILKTConfig(PretrainedConfig):
|
7 |
+
|
8 |
+
model_type = "ILKT"
|
9 |
+
|
10 |
+
def __init__(
|
11 |
+
self,
|
12 |
+
backbone_config: Dict[str, Any] = {},
|
13 |
+
embedding_head_config: Dict[str, Any] = {},
|
14 |
+
mlm_head_config: Dict[str, Any] = {},
|
15 |
+
cls_head_config: Dict[str, Any] = {},
|
16 |
+
cls_heads: List[Tuple[int, str]] = [],
|
17 |
+
max_length: int = 512,
|
18 |
+
**kwargs
|
19 |
+
):
|
20 |
+
self.backbone_config = backbone_config
|
21 |
+
self.embedding_head_config = embedding_head_config
|
22 |
+
self.mlm_head_config = mlm_head_config
|
23 |
+
self.cls_head_config = cls_head_config
|
24 |
+
self.cls_heads = cls_heads
|
25 |
+
self.max_length = False
|
26 |
+
self.output_hidden_states = False
|
27 |
+
|
28 |
+
# TODO:
|
29 |
+
# make config a proper HF config, save max length ets, don't know how it works exactly in hf ecosystem
|
30 |
+
|
31 |
+
super().__init__(**kwargs)
|
model.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, Optional
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
from transformers import AutoConfig, AutoModel, PreTrainedModel
|
6 |
+
from transformers.modeling_outputs import (
|
7 |
+
BaseModelOutputWithPooling,
|
8 |
+
MaskedLMOutput,
|
9 |
+
BaseModelOutput,
|
10 |
+
SequenceClassifierOutput,
|
11 |
+
)
|
12 |
+
from enum import Enum
|
13 |
+
|
14 |
+
from .config import ILKTConfig
|
15 |
+
|
16 |
+
|
17 |
+
def cls_pooling(last_hidden_state, attention_mask):
|
18 |
+
return last_hidden_state[:, 0, :]
|
19 |
+
|
20 |
+
|
21 |
+
def create_head_blocks(
|
22 |
+
hidden_size: int,
|
23 |
+
n_dense: int,
|
24 |
+
use_batch_norm: bool,
|
25 |
+
use_layer_norm: bool,
|
26 |
+
dropout: float,
|
27 |
+
**kwargs,
|
28 |
+
) -> nn.Module:
|
29 |
+
blocks = []
|
30 |
+
for _ in range(n_dense):
|
31 |
+
blocks.append(nn.Linear(hidden_size, hidden_size))
|
32 |
+
if use_batch_norm:
|
33 |
+
blocks.append(nn.BatchNorm1d(hidden_size))
|
34 |
+
elif use_layer_norm:
|
35 |
+
blocks.append(nn.LayerNorm(hidden_size))
|
36 |
+
blocks.append(nn.ReLU())
|
37 |
+
if dropout > 0:
|
38 |
+
blocks.append(nn.Dropout(dropout))
|
39 |
+
return nn.Sequential(*blocks)
|
40 |
+
|
41 |
+
|
42 |
+
class SentenceEmbeddingHead(nn.Module):
|
43 |
+
def __init__(
|
44 |
+
self, backbone_hidden_size: int, embedding_head_config: Dict[str, Any]
|
45 |
+
):
|
46 |
+
super().__init__()
|
47 |
+
self.config = embedding_head_config
|
48 |
+
|
49 |
+
self.head = nn.Sequential(
|
50 |
+
*[
|
51 |
+
create_head_blocks(backbone_hidden_size, **embedding_head_config),
|
52 |
+
]
|
53 |
+
)
|
54 |
+
|
55 |
+
def forward(
|
56 |
+
self, backbone_output: BaseModelOutput, attention_mask: torch.Tensor, **kwargs
|
57 |
+
) -> BaseModelOutputWithPooling:
|
58 |
+
if self.config["pool_type"] == "cls":
|
59 |
+
embeddings = cls_pooling(backbone_output.last_hidden_state, attention_mask)
|
60 |
+
else:
|
61 |
+
raise NotImplementedError(
|
62 |
+
f"Pooling type {self.config['pool_type']} not implemented"
|
63 |
+
)
|
64 |
+
if self.config["normalize_embeddings"]:
|
65 |
+
embeddings = nn.functional.normalize(embeddings, p=2, dim=-1)
|
66 |
+
return BaseModelOutputWithPooling(
|
67 |
+
last_hidden_state=backbone_output.last_hidden_state,
|
68 |
+
pooler_output=embeddings, # type: ignore
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
class MLMHead(nn.Module):
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
backbone_hidden_size: int,
|
76 |
+
vocab_size: int,
|
77 |
+
mlm_head_config: Dict[str, Any],
|
78 |
+
):
|
79 |
+
super().__init__()
|
80 |
+
self.config = mlm_head_config
|
81 |
+
|
82 |
+
self.head = nn.Sequential(
|
83 |
+
*[
|
84 |
+
create_head_blocks(backbone_hidden_size, **mlm_head_config),
|
85 |
+
nn.Linear(backbone_hidden_size, vocab_size),
|
86 |
+
]
|
87 |
+
)
|
88 |
+
|
89 |
+
def forward(
|
90 |
+
self,
|
91 |
+
backbone_output: BaseModelOutput,
|
92 |
+
attention_mask: torch.Tensor,
|
93 |
+
labels: Optional[torch.Tensor] = None,
|
94 |
+
**kwargs,
|
95 |
+
) -> MaskedLMOutput:
|
96 |
+
prediction_scores = self.head(backbone_output.last_hidden_state)
|
97 |
+
|
98 |
+
loss = None
|
99 |
+
if labels is not None:
|
100 |
+
loss_fct = nn.CrossEntropyLoss()
|
101 |
+
loss = loss_fct(
|
102 |
+
prediction_scores.view(-1, prediction_scores.size(-1)),
|
103 |
+
labels.view(-1),
|
104 |
+
)
|
105 |
+
return MaskedLMOutput(loss=loss, logits=prediction_scores)
|
106 |
+
|
107 |
+
|
108 |
+
class CLSHead(nn.Module):
|
109 |
+
def __init__(
|
110 |
+
self,
|
111 |
+
backbone_hidden_size: int,
|
112 |
+
n_classes: int,
|
113 |
+
cls_head_config: Dict[str, Any],
|
114 |
+
):
|
115 |
+
super().__init__()
|
116 |
+
self.config = cls_head_config
|
117 |
+
|
118 |
+
self.head = nn.Sequential(
|
119 |
+
*[
|
120 |
+
create_head_blocks(backbone_hidden_size, **cls_head_config),
|
121 |
+
nn.Linear(backbone_hidden_size, n_classes),
|
122 |
+
]
|
123 |
+
)
|
124 |
+
|
125 |
+
def forward(
|
126 |
+
self,
|
127 |
+
backbone_output: BaseModelOutput,
|
128 |
+
attention_mask: torch.Tensor,
|
129 |
+
labels: Optional[torch.Tensor] = None,
|
130 |
+
**kwargs,
|
131 |
+
) -> SequenceClassifierOutput:
|
132 |
+
if self.config["pool_type"] == "cls":
|
133 |
+
embeddings = cls_pooling(backbone_output.last_hidden_state, attention_mask)
|
134 |
+
else:
|
135 |
+
raise NotImplementedError(
|
136 |
+
f"Pooling type {self.config['pool_type']} not implemented"
|
137 |
+
)
|
138 |
+
|
139 |
+
prediction_scores = self.head(embeddings)
|
140 |
+
|
141 |
+
loss = None
|
142 |
+
if labels is not None:
|
143 |
+
loss_fct = nn.CrossEntropyLoss()
|
144 |
+
loss = loss_fct(
|
145 |
+
prediction_scores.view(-1, prediction_scores.size(-1)),
|
146 |
+
labels.view(-1),
|
147 |
+
)
|
148 |
+
return SequenceClassifierOutput(loss=loss, logits=prediction_scores)
|
149 |
+
|
150 |
+
|
151 |
+
class ForwardRouting(Enum):
|
152 |
+
GET_SENTENCE_EMBEDDING = "get_sentence_embedding"
|
153 |
+
GET_MLM_OUTPUT = "get_mlm_output"
|
154 |
+
GET_CLS_OUTPUT = "get_cls_output"
|
155 |
+
|
156 |
+
|
157 |
+
class ILKTModel(PreTrainedModel):
|
158 |
+
config_class = ILKTConfig
|
159 |
+
|
160 |
+
def __init__(self, config: ILKTConfig):
|
161 |
+
super().__init__(config)
|
162 |
+
|
163 |
+
backbone_config = AutoConfig.from_pretrained(**config.backbone_config)
|
164 |
+
pretrained_model_name_or_path = config.backbone_config[
|
165 |
+
"pretrained_model_name_or_path"
|
166 |
+
]
|
167 |
+
self.backbone = AutoModel.from_pretrained(
|
168 |
+
pretrained_model_name_or_path, config=backbone_config
|
169 |
+
)
|
170 |
+
|
171 |
+
backbone_hidden_size = backbone_config.hidden_size
|
172 |
+
self.config.hidden_size = backbone_hidden_size
|
173 |
+
backbone_vocab_size = backbone_config.vocab_size
|
174 |
+
self.embedding_head = SentenceEmbeddingHead(
|
175 |
+
backbone_hidden_size, config.embedding_head_config
|
176 |
+
)
|
177 |
+
self.mlm_head = MLMHead(
|
178 |
+
backbone_hidden_size, backbone_vocab_size, config.mlm_head_config
|
179 |
+
)
|
180 |
+
|
181 |
+
self.cls_heads = nn.ModuleDict(
|
182 |
+
dict(
|
183 |
+
[
|
184 |
+
(
|
185 |
+
name,
|
186 |
+
CLSHead(
|
187 |
+
backbone_hidden_size, n_classes, config.cls_head_config
|
188 |
+
),
|
189 |
+
)
|
190 |
+
for n_classes, name in config.cls_heads
|
191 |
+
]
|
192 |
+
)
|
193 |
+
)
|
194 |
+
|
195 |
+
def forward(
|
196 |
+
self,
|
197 |
+
input_ids: torch.Tensor,
|
198 |
+
attention_mask: torch.Tensor,
|
199 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
200 |
+
forward_routing: ForwardRouting = ForwardRouting.GET_SENTENCE_EMBEDDING,
|
201 |
+
**kwargs,
|
202 |
+
):
|
203 |
+
if forward_routing == ForwardRouting.GET_SENTENCE_EMBEDDING:
|
204 |
+
return self.get_sentence_embedding(
|
205 |
+
input_ids, attention_mask, token_type_ids=token_type_ids
|
206 |
+
)
|
207 |
+
elif forward_routing == ForwardRouting.GET_MLM_OUTPUT:
|
208 |
+
return self.get_mlm_output(
|
209 |
+
input_ids, attention_mask, token_type_ids=token_type_ids, **kwargs
|
210 |
+
)
|
211 |
+
elif forward_routing == ForwardRouting.GET_CLS_OUTPUT:
|
212 |
+
return self.get_cls_output(
|
213 |
+
input_ids, attention_mask, token_type_ids=token_type_ids, **kwargs
|
214 |
+
)
|
215 |
+
else:
|
216 |
+
raise ValueError(f"Unknown forward routing {forward_routing}")
|
217 |
+
|
218 |
+
def get_sentence_embedding(
|
219 |
+
self, input_ids: torch.Tensor, attention_mask: torch.Tensor, **kwargs
|
220 |
+
):
|
221 |
+
backbone_output: BaseModelOutput = self.backbone(
|
222 |
+
input_ids=input_ids, attention_mask=attention_mask, **kwargs
|
223 |
+
)
|
224 |
+
|
225 |
+
embedding_output = self.embedding_head(
|
226 |
+
backbone_output, attention_mask, **kwargs
|
227 |
+
)
|
228 |
+
|
229 |
+
return embedding_output
|
230 |
+
|
231 |
+
def get_mlm_output(
|
232 |
+
self,
|
233 |
+
input_ids: torch.Tensor,
|
234 |
+
attention_mask: torch.Tensor,
|
235 |
+
labels: Optional[torch.Tensor] = None,
|
236 |
+
**kwargs,
|
237 |
+
):
|
238 |
+
backbone_output: BaseModelOutput = self.backbone(
|
239 |
+
input_ids=input_ids, attention_mask=attention_mask, **kwargs
|
240 |
+
)
|
241 |
+
|
242 |
+
mlm_output = self.mlm_head(backbone_output, attention_mask, labels, **kwargs)
|
243 |
+
|
244 |
+
return mlm_output
|
245 |
+
|
246 |
+
def get_cls_output(
|
247 |
+
self,
|
248 |
+
input_ids: torch.Tensor,
|
249 |
+
attention_mask: torch.Tensor,
|
250 |
+
head_name: str,
|
251 |
+
labels: Optional[torch.Tensor] = None,
|
252 |
+
**kwargs,
|
253 |
+
):
|
254 |
+
backbone_output: BaseModelOutput = self.backbone(
|
255 |
+
input_ids=input_ids, attention_mask=attention_mask, **kwargs
|
256 |
+
)
|
257 |
+
|
258 |
+
if head_name not in self.cls_heads:
|
259 |
+
raise ValueError(f"Head {head_name} not found in model")
|
260 |
+
|
261 |
+
cls_output = self.cls_heads[head_name](
|
262 |
+
backbone_output, attention_mask, labels, **kwargs
|
263 |
+
)
|
264 |
+
|
265 |
+
return cls_output
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a085aafaa0792023b095eee855d8c03467a0971431edb5b5225742534e345ca0
|
3 |
+
size 1887360524
|