nazneen commited on
Commit
c1bd56c
β€’
1 Parent(s): 9a316f2

Delete instruct_pipeline.py

Browse files
Files changed (1) hide show
  1. instruct_pipeline.py +0 -164
instruct_pipeline.py DELETED
@@ -1,164 +0,0 @@
1
- import logging
2
- import re
3
-
4
- import numpy as np
5
- from transformers import Pipeline, PreTrainedTokenizer
6
-
7
- logger = logging.getLogger(__name__)
8
-
9
- INSTRUCTION_KEY = "### Instruction:"
10
- RESPONSE_KEY = "### Response:"
11
- END_KEY = "### End"
12
- INTRO_BLURB = (
13
- "Below is an instruction that describes a task. Write a response that appropriately completes the request."
14
- )
15
-
16
- # This is the prompt that is used for generating responses using an already trained model. It ends with the response
17
- # key, where the job of the model is to provide the completion that follows it (i.e. the response itself).
18
- PROMPT_FOR_GENERATION_FORMAT = """{intro}
19
-
20
- {instruction_key}
21
- {instruction}
22
-
23
- {response_key}
24
- """.format(
25
- intro=INTRO_BLURB,
26
- instruction_key=INSTRUCTION_KEY,
27
- instruction="{instruction}",
28
- response_key=RESPONSE_KEY,
29
- )
30
-
31
-
32
- def get_special_token_id(tokenizer: PreTrainedTokenizer, key: str) -> int:
33
- """Gets the token ID for a given string that has been added to the tokenizer as a special token.
34
-
35
- When training, we configure the tokenizer so that the sequences like "### Instruction:" and "### End" are
36
- treated specially and converted to a single, new token. This retrieves the token ID each of these keys map to.
37
-
38
- Args:
39
- tokenizer (PreTrainedTokenizer): the tokenizer
40
- key (str): the key to convert to a single token
41
-
42
- Raises:
43
- RuntimeError: if more than one ID was generated
44
-
45
- Returns:
46
- int: the token ID for the given key
47
- """
48
- token_ids = tokenizer.encode(key)
49
- if len(token_ids) > 1:
50
- raise ValueError(f"Expected only a single token for '{key}' but found {token_ids}")
51
- return token_ids[0]
52
-
53
-
54
- class InstructionTextGenerationPipeline(Pipeline):
55
- def __init__(
56
- self, *args, do_sample: bool = True, max_new_tokens: int = 256, top_p: float = 0.92, top_k: int = 0, **kwargs
57
- ):
58
- super().__init__(*args, do_sample=do_sample, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k, **kwargs)
59
-
60
- def _sanitize_parameters(self, return_instruction_text=False, **generate_kwargs):
61
- preprocess_params = {}
62
-
63
- # newer versions of the tokenizer configure the response key as a special token. newer versions still may
64
- # append a newline to yield a single token. find whatever token is configured for the response key.
65
- tokenizer_response_key = next(
66
- (token for token in self.tokenizer.additional_special_tokens if token.startswith(RESPONSE_KEY)), None
67
- )
68
-
69
- response_key_token_id = None
70
- end_key_token_id = None
71
- if tokenizer_response_key:
72
- try:
73
- response_key_token_id = get_special_token_id(self.tokenizer, tokenizer_response_key)
74
- end_key_token_id = get_special_token_id(self.tokenizer, END_KEY)
75
-
76
- # Ensure generation stops once it generates "### End"
77
- generate_kwargs["eos_token_id"] = end_key_token_id
78
- except ValueError:
79
- pass
80
-
81
- forward_params = generate_kwargs
82
- postprocess_params = {
83
- "response_key_token_id": response_key_token_id,
84
- "end_key_token_id": end_key_token_id,
85
- "return_instruction_text": return_instruction_text,
86
- }
87
-
88
- return preprocess_params, forward_params, postprocess_params
89
-
90
- def preprocess(self, instruction_text, **generate_kwargs):
91
- prompt_text = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction_text)
92
- inputs = self.tokenizer(
93
- prompt_text,
94
- return_tensors="pt",
95
- )
96
- inputs["prompt_text"] = prompt_text
97
- inputs["instruction_text"] = instruction_text
98
- return inputs
99
-
100
- def _forward(self, model_inputs, **generate_kwargs):
101
- input_ids = model_inputs["input_ids"]
102
- attention_mask = model_inputs.get("attention_mask", None)
103
- generated_sequence = self.model.generate(
104
- input_ids=input_ids.to(self.model.device),
105
- attention_mask=attention_mask,
106
- pad_token_id=self.tokenizer.pad_token_id,
107
- **generate_kwargs,
108
- )[0].cpu()
109
- instruction_text = model_inputs.pop("instruction_text")
110
- return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text}
111
-
112
- def postprocess(self, model_outputs, response_key_token_id, end_key_token_id, return_instruction_text):
113
- sequence = model_outputs["generated_sequence"]
114
- instruction_text = model_outputs["instruction_text"]
115
-
116
- # The response will be set to this variable if we can identify it.
117
- decoded = None
118
-
119
- # If we have token IDs for the response and end, then we can find the tokens and only decode between them.
120
- if response_key_token_id and end_key_token_id:
121
- # Find where "### Response:" is first found in the generated tokens. Considering this is part of the
122
- # prompt, we should definitely find it. We will return the tokens found after this token.
123
- response_pos = None
124
- response_positions = np.where(sequence == response_key_token_id)[0]
125
- if len(response_positions) == 0:
126
- logger.warn(f"Could not find response key {response_key_token_id} in: {sequence}")
127
- else:
128
- response_pos = response_positions[0]
129
-
130
- if response_pos:
131
- # Next find where "### End" is located. The model has been trained to end its responses with this
132
- # sequence (or actually, the token ID it maps to, since it is a special token). We may not find
133
- # this token, as the response could be truncated. If we don't find it then just return everything
134
- # to the end. Note that even though we set eos_token_id, we still see the this token at the end.
135
- end_pos = None
136
- end_positions = np.where(sequence == end_key_token_id)[0]
137
- if len(end_positions) > 0:
138
- end_pos = end_positions[0]
139
-
140
- decoded = self.tokenizer.decode(sequence[response_pos + 1 : end_pos]).strip()
141
- else:
142
- # Otherwise we'll decode everything and use a regex to find the response and end.
143
-
144
- fully_decoded = self.tokenizer.decode(sequence)
145
-
146
- # The response appears after "### Response:". The model has been trained to append "### End" at the
147
- # end.
148
- m = re.search(r"#+\s*Response:\s*(.+?)#+\s*End", fully_decoded, flags=re.DOTALL)
149
-
150
- if m:
151
- decoded = m.group(1).strip()
152
- else:
153
- # The model might not generate the "### End" sequence before reaching the max tokens. In this case,
154
- # return everything after "### Response:".
155
- m = re.search(r"#+\s*Response:\s*(.+)", fully_decoded, flags=re.DOTALL)
156
- if m:
157
- decoded = m.group(1).strip()
158
- else:
159
- logger.warn(f"Failed to find response in:\n{fully_decoded}")
160
-
161
- if return_instruction_text:
162
- return {"instruction_text": instruction_text, "generated_text": decoded}
163
-
164
- return decoded