Upload model
Browse files- README.md +199 -0
- config.json +42 -0
- configuration_sip_finetune.py +21 -0
- generation_config.json +7 -0
- model.safetensors +3 -0
- modeling_sip_finetune.py +102 -0
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
tags: []
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/home/matthias/phd/artificial_tasks/meta_adapters/models/w_fsts_pretrain_s4_32_hf_ft",
|
3 |
+
"architectures": [
|
4 |
+
"SIPFinetuningModel"
|
5 |
+
],
|
6 |
+
"auto_map": {
|
7 |
+
"AutoModel": "modeling_sip_finetune.SIPFinetuningModel",
|
8 |
+
"AutoModelForSeq2SeqLM": "configuration_sip_finetune.SIPFinetuningModelConfig"
|
9 |
+
},
|
10 |
+
"classifier_dropout": 0.0,
|
11 |
+
"d_ff": 3584,
|
12 |
+
"d_kv": 64,
|
13 |
+
"d_model": 1472,
|
14 |
+
"decoder_start_token_id": 0,
|
15 |
+
"dense_act_fn": "gelu_new",
|
16 |
+
"dropout_rate": 0.1,
|
17 |
+
"eos_token_id": 1,
|
18 |
+
"feed_forward_proj": "gated-gelu",
|
19 |
+
"gradient_checkpointing": false,
|
20 |
+
"initializer_factor": 1.0,
|
21 |
+
"is_encoder_decoder": true,
|
22 |
+
"is_gated_act": true,
|
23 |
+
"layer_norm_epsilon": 1e-06,
|
24 |
+
"model_type": "sip_finetune",
|
25 |
+
"num_decoder_layers": 4,
|
26 |
+
"num_examples": 32,
|
27 |
+
"num_heads": 6,
|
28 |
+
"num_layers": 12,
|
29 |
+
"num_precomputed_examples": 400,
|
30 |
+
"pad_token_id": 0,
|
31 |
+
"prefix_length": 50,
|
32 |
+
"prefix_max_init_length": 70,
|
33 |
+
"random_selection": true,
|
34 |
+
"relative_attention_max_distance": 128,
|
35 |
+
"relative_attention_num_buckets": 32,
|
36 |
+
"tie_word_embeddings": false,
|
37 |
+
"tokenizer_class": "ByT5Tokenizer",
|
38 |
+
"torch_dtype": "float32",
|
39 |
+
"transformers_version": "4.38.1",
|
40 |
+
"use_cache": true,
|
41 |
+
"vocab_size": 384
|
42 |
+
}
|
configuration_sip_finetune.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from transformers import T5Config
|
3 |
+
|
4 |
+
class SIPFinetuningModelConfig(T5Config):
|
5 |
+
model_type = "sip_finetune"
|
6 |
+
|
7 |
+
def __init__(self,
|
8 |
+
num_examples: int = 32,
|
9 |
+
prefix_length: int = 50,
|
10 |
+
random_selection: bool = True,
|
11 |
+
# don't change these unless you change what the prefix of the model is initialized with:
|
12 |
+
prefix_max_init_length: int = 70,
|
13 |
+
num_precomputed_examples: int = 400,
|
14 |
+
**kwargs):
|
15 |
+
# These are all about the initialization of the prefix.
|
16 |
+
self.num_examples = num_examples
|
17 |
+
self.prefix_length = prefix_length
|
18 |
+
self.random_selection = random_selection
|
19 |
+
self.prefix_max_init_length = prefix_max_init_length
|
20 |
+
self.num_precomputed_examples = num_precomputed_examples
|
21 |
+
super().__init__(**kwargs)
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"decoder_start_token_id": 0,
|
4 |
+
"eos_token_id": 1,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.38.1"
|
7 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7450e9144e3d83b9b6f67ce2701ca8d544664d37ba89a80c4f8f9b3139f16480
|
3 |
+
size 1363731112
|
modeling_sip_finetune.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, PretrainedConfig, T5Config, PreTrainedModel, T5ForConditionalGeneration, \
|
3 |
+
AutoModelForSeq2SeqLM
|
4 |
+
|
5 |
+
from typing import Optional, List, Callable, Mapping, Any, Union
|
6 |
+
import os
|
7 |
+
|
8 |
+
from .configuration_sip_finetune import SIPFinetuningModelConfig
|
9 |
+
|
10 |
+
|
11 |
+
class SIPFinetuningModel(PreTrainedModel):
|
12 |
+
config_class = SIPFinetuningModelConfig
|
13 |
+
|
14 |
+
def __init__(self, config: SIPFinetuningModelConfig):
|
15 |
+
super().__init__(config)
|
16 |
+
|
17 |
+
self.model = T5ForConditionalGeneration(config)
|
18 |
+
|
19 |
+
# Initialize the prefix with NaNs.
|
20 |
+
self.register_buffer("prefix_init_tensor", torch.zeros(config.num_precomputed_examples, config.prefix_max_init_length, config.d_model))
|
21 |
+
|
22 |
+
# There are two cases: (1) we initialize the model after SIP-pretraining, i.e. the tunable prefix is not set
|
23 |
+
# and (2) the model has been fine-tuned on downstream data, and hence there is meaningful data in the tunable prefix
|
24 |
+
|
25 |
+
# Initialize the prefix with NaNs. If we initialize from SIP-pretraining, this will not be overwritten by a custom version of from_pretrained
|
26 |
+
# if we initialize after fine-tuning, the NaNs will be overwritten anyway.
|
27 |
+
|
28 |
+
self.prefix_embedding = torch.nn.Parameter(torch.nan + torch.zeros((1, self.config.prefix_length, self.config.d_model)))
|
29 |
+
self.prefix_has_been_initialized = False
|
30 |
+
|
31 |
+
def _initialize_prefix(self):
|
32 |
+
prefix_init_tensor = self.prefix_init_tensor
|
33 |
+
if self.config.random_selection:
|
34 |
+
# randomize selection of FSTs to average for initialization the prefix.
|
35 |
+
prefix_init_tensor = prefix_init_tensor[torch.randperm(prefix_init_tensor.shape[0]), :, :]
|
36 |
+
|
37 |
+
prefix_init_tensor = prefix_init_tensor[:self.config.num_examples, :self.config.prefix_length,
|
38 |
+
:] # shape (num ex, prefix length, d model)
|
39 |
+
self.prefix_embedding.data.copy_(prefix_init_tensor.mean(dim=0, keepdims=True))
|
40 |
+
|
41 |
+
@classmethod
|
42 |
+
def from_pretrained(
|
43 |
+
cls,
|
44 |
+
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
|
45 |
+
*model_args,
|
46 |
+
**kwargs,
|
47 |
+
):
|
48 |
+
model = super(SIPFinetuningModel, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
49 |
+
if torch.all(model.prefix_embedding.isnan()):
|
50 |
+
model._initialize_prefix()
|
51 |
+
return model
|
52 |
+
|
53 |
+
|
54 |
+
def prepare_input(self, kwargs):
|
55 |
+
"""
|
56 |
+
Prepends the prefix to the given input.
|
57 |
+
:param kwargs:
|
58 |
+
:return:
|
59 |
+
"""
|
60 |
+
input_ids = kwargs["input_ids"]
|
61 |
+
|
62 |
+
embedded_inputs = self.model.get_input_embeddings()(input_ids)
|
63 |
+
|
64 |
+
batch_size = input_ids.shape[0]
|
65 |
+
|
66 |
+
prefix = torch.repeat_interleave(self.prefix_embedding, batch_size, 0) #shape (batch, prefix length, embed dim)
|
67 |
+
|
68 |
+
kwargs = dict(kwargs)
|
69 |
+
|
70 |
+
embedded_inputs = torch.cat([prefix, embedded_inputs], dim=1) # shape (batch, prefix + seq length, embed dim)
|
71 |
+
|
72 |
+
del kwargs["input_ids"]
|
73 |
+
kwargs["inputs_embeds"] = embedded_inputs
|
74 |
+
|
75 |
+
if "attention_mask" in kwargs:
|
76 |
+
ones = torch.ones((batch_size, self.config.prefix_length), device=embedded_inputs.device, dtype=kwargs["attention_mask"].dtype)
|
77 |
+
input_mask = torch.cat([ones, kwargs["attention_mask"]], dim=1)
|
78 |
+
kwargs["attention_mask"] = input_mask
|
79 |
+
|
80 |
+
return kwargs
|
81 |
+
|
82 |
+
def forward(self, **kwargs):
|
83 |
+
return self.model(**self.prepare_input(kwargs))
|
84 |
+
|
85 |
+
def generate(self, **kwargs):
|
86 |
+
return self.model.generate(**self.prepare_input(kwargs))
|
87 |
+
|
88 |
+
|
89 |
+
def get_optimizer(self, optimizer: Callable[..., torch.optim.Optimizer], prefix_lr:float = 1.0, **kwargs) -> torch.optim.Optimizer:
|
90 |
+
"""
|
91 |
+
Return an optimizer that uses a different learning rate (typically higher) for the prefix than for the rest of the model.
|
92 |
+
"""
|
93 |
+
|
94 |
+
prefix_params = []
|
95 |
+
other_params = []
|
96 |
+
for name, param in self.named_parameters():
|
97 |
+
if name == "prefix_embedding":
|
98 |
+
prefix_params.append(param)
|
99 |
+
else:
|
100 |
+
other_params.append(param)
|
101 |
+
return optimizer(params=[{"params": prefix_params, "lr": prefix_lr}, {"params": other_params}], **kwargs)
|
102 |
+
|