furquan commited on
Commit
9625083
1 Parent(s): 8830399

OPT 1.3b Prompt Tuned

Browse files
Files changed (4) hide show
  1. config.json +35 -0
  2. generation_config.json +7 -0
  3. prompt_tuned_opt.py +239 -0
  4. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-1.3b",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPT_PromptTuned_For_SentimentAnalysis"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "auto_map": {
11
+ "AutoModel": "prompt_tuned_opt.OPT_PromptTuned_For_SentimentAnalysis"
12
+ },
13
+ "bos_token_id": 2,
14
+ "do_layer_norm_before": true,
15
+ "dropout": 0.1,
16
+ "enable_bias": true,
17
+ "eos_token_id": 1,
18
+ "ffn_dim": 8192,
19
+ "hidden_size": 2048,
20
+ "init_std": 0.02,
21
+ "layer_norm_elementwise_affine": true,
22
+ "layerdrop": 0.0,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "opt",
25
+ "num_attention_heads": 32,
26
+ "num_hidden_layers": 24,
27
+ "pad_token_id": 1,
28
+ "prefix": "</s>",
29
+ "prompt_dict_path": "./prompt-embedding-1.3b",
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.33.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50272,
34
+ "word_embed_proj_dim": 2048
35
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.33.1"
7
+ }
prompt_tuned_opt.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel
2
+ import torch
3
+ import torch.nn as nn
4
+ from typing import List, Optional, Tuple, Union
5
+
6
+ from transformers.utils import (
7
+ add_code_sample_docstrings,
8
+ add_start_docstrings,
9
+ add_start_docstrings_to_model_forward,
10
+ logging,
11
+ replace_return_docstrings,
12
+ )
13
+
14
+ from transformers.modeling_outputs import (
15
+ BaseModelOutputWithPast,
16
+ CausalLMOutputWithPast,
17
+ QuestionAnsweringModelOutput,
18
+ SequenceClassifierOutputWithPast,
19
+ )
20
+
21
+ from transformers import OPTConfig
22
+ from transformers.models.opt.modeling_opt import OPTModel
23
+ from transformers.models.opt.modeling_opt import OPTPreTrainedModel
24
+
25
+ class OPT_PromptTuned_For_SentimentAnalysis(OPTPreTrainedModel):
26
+ _tied_weights_keys = ["lm_head.weight"]
27
+ _CONFIG_FOR_DOC = "OPTConfig"
28
+ config_class = OPTConfig
29
+
30
+
31
+ def __init__(self, config):
32
+
33
+ self.config = config
34
+ super().__init__(config)
35
+ self.model = OPTModel(config)
36
+ self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False)
37
+ self.embedding = nn.Embedding(8, config.word_embed_proj_dim)
38
+ self.post_init()
39
+
40
+
41
+ def get_input_embeddings(self):
42
+ return self.model.decoder.embed_tokens
43
+
44
+ def set_input_embeddings(self, value):
45
+ self.model.decoder.embed_tokens = value
46
+
47
+ def get_output_embeddings(self):
48
+ return self.lm_head
49
+
50
+ def set_output_embeddings(self, new_embeddings):
51
+ self.lm_head = new_embeddings
52
+
53
+ def set_decoder(self, decoder):
54
+ self.model.decoder = decoder
55
+
56
+ def get_decoder(self):
57
+ return self.model.decoder
58
+
59
+ def load_prompts(self):
60
+ self.embedding.load_state_dict(torch.load(self.config.prompt_dict_path))
61
+ return self
62
+
63
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
64
+ def forward(
65
+ self,
66
+ input_ids: torch.LongTensor = None,
67
+ attention_mask: Optional[torch.Tensor] = None,
68
+ head_mask: Optional[torch.Tensor] = None,
69
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
70
+ inputs_embeds: Optional[torch.FloatTensor] = None,
71
+ labels: Optional[torch.LongTensor] = None,
72
+ use_cache: Optional[bool] = None,
73
+ output_attentions: Optional[bool] = None,
74
+ output_hidden_states: Optional[bool] = None,
75
+ return_dict: Optional[bool] = None
76
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
77
+ r"""
78
+ Args:
79
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
80
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
81
+ provide it.
82
+
83
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
84
+ [`PreTrainedTokenizer.__call__`] for details.
85
+
86
+ [What are input IDs?](../glossary#input-ids)
87
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
88
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
89
+
90
+ - 1 for tokens that are **not masked**,
91
+ - 0 for tokens that are **masked**.
92
+
93
+ [What are attention masks?](../glossary#attention-mask)
94
+ head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
95
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
96
+
97
+ - 1 indicates the head is **not masked**,
98
+ - 0 indicates the head is **masked**.
99
+
100
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
101
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
102
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
103
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
104
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
105
+
106
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
107
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
108
+
109
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
110
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
111
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
112
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
113
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
114
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
115
+ than the model's internal embedding lookup matrix.
116
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
117
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
118
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
119
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
120
+ use_cache (`bool`, *optional*):
121
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
122
+ (see `past_key_values`).
123
+ output_attentions (`bool`, *optional*):
124
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
125
+ returned tensors for more detail.
126
+ output_hidden_states (`bool`, *optional*):
127
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
128
+ for more detail.
129
+ return_dict (`bool`, *optional*):
130
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
131
+
132
+ Returns:
133
+
134
+ Example:
135
+
136
+ ```python
137
+ >>> from transformers import AutoTokenizer, OPTForCausalLM
138
+
139
+ >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
140
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
141
+
142
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
143
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
144
+
145
+ >>> # Generate
146
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
147
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
148
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
149
+ ```"""
150
+
151
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
152
+ output_hidden_states = (
153
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
154
+ )
155
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
156
+
157
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
158
+
159
+
160
+ outputs = self.model.decoder(
161
+ input_ids=input_ids,
162
+ attention_mask=attention_mask,
163
+ head_mask=head_mask,
164
+ past_key_values=past_key_values,
165
+ inputs_embeds=inputs_embeds,
166
+ use_cache=True,
167
+ output_attentions=output_attentions,
168
+ output_hidden_states=output_hidden_states,
169
+ return_dict=return_dict
170
+ )
171
+
172
+ logits = self.lm_head(outputs[0]).contiguous()
173
+
174
+ loss = None
175
+ if labels is not None:
176
+ # move labels to correct device to enable model parallelism
177
+ labels = labels.to(logits.device)
178
+ # Shift so that tokens < n predict n
179
+ shift_logits = logits[..., :-1, :].contiguous()
180
+ shift_labels = labels[..., 1:].contiguous()
181
+ # Flatten the tokens
182
+ loss_fct = CrossEntropyLoss()
183
+ loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
184
+
185
+ if not return_dict:
186
+ output = (logits,) + outputs[1:]
187
+ return (loss,) + output if loss is not None else output
188
+
189
+ return CausalLMOutputWithPast(
190
+ loss=loss,
191
+ logits=logits,
192
+ past_key_values=outputs.past_key_values,
193
+ hidden_states=outputs.hidden_states,
194
+ attentions=outputs.attentions,
195
+ )
196
+
197
+ def prepare_inputs_for_generation(
198
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
199
+ ):
200
+
201
+ if past_key_values:
202
+ input_ids = input_ids[:, -1:]
203
+
204
+ if inputs_embeds is not None and past_key_values is None:
205
+ input = torch.tensor([0,1,2,3,4,5,6,7]).to(inputs_embeds.device)
206
+ inputs_embeds = torch.cat([self.embedding(input).unsqueeze(0), inputs_embeds], dim=1)
207
+ attention_mask = torch.cat(
208
+ [torch.ones((attention_mask.shape[0], 8),
209
+ device=attention_mask.device),
210
+ attention_mask], dim=1)
211
+ model_inputs = {"inputs_embeds": inputs_embeds}
212
+ else:
213
+ model_inputs = {"input_ids": input_ids}
214
+ attention_mask = torch.cat(
215
+ [torch.ones((attention_mask.shape[0], 8),
216
+ device=attention_mask.device),
217
+ attention_mask], dim=1)
218
+
219
+ model_inputs.update(
220
+ {
221
+ "past_key_values": past_key_values,
222
+ "use_cache": kwargs.get("use_cache"),
223
+ "attention_mask": attention_mask,
224
+ }
225
+ )
226
+ return model_inputs
227
+
228
+ def generate(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs):
229
+ max_new_tokens = 3
230
+ input_embeddings = self.get_input_embeddings()(input_ids).to(input_ids.device)
231
+ # Call the original generate method
232
+ return super().generate(input_ids=input_ids, inputs_embeds=input_embeddings,max_new_tokens=max_new_tokens,attention_mask=attention_mask, **kwargs)
233
+
234
+ @staticmethod
235
+ def _reorder_cache(past_key_values, beam_idx):
236
+ reordered_past = ()
237
+ for layer_past in past_key_values:
238
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
239
+ return reordered_past
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79791b34225a0c8644e7c4d71a287008ce2348c8759c1d4842b7681d2fe9e5e7
3
+ size 5263230661