arnocandel commited on
Commit
2951866
1 Parent(s): 836bab6

Upload stopping.py

Browse files
Files changed (1) hide show
  1. stopping.py +70 -0
stopping.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import StoppingCriteria, StoppingCriteriaList
3
+
4
+
5
+ class StoppingCriteriaSub(StoppingCriteria):
6
+
7
+ def __init__(self, stops=[], encounters=[], device="cuda"):
8
+ super().__init__()
9
+ assert len(stops) % len(encounters) == 0, "Number of stops and encounters must match"
10
+ self.encounters = encounters
11
+ self.stops = [stop.to(device) for stop in stops]
12
+ self.num_stops = [0] * len(stops)
13
+
14
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
15
+ for stopi, stop in enumerate(self.stops):
16
+ if torch.all((stop == input_ids[0][-len(stop):])).item():
17
+ self.num_stops[stopi] += 1
18
+ if self.num_stops[stopi] >= self.encounters[stopi % len(self.encounters)]:
19
+ # print("Stopped", flush=True)
20
+ return True
21
+ # print("Tokens: %s" % input_ids[0].cpu().numpy(), flush=True)
22
+ # print("Stop Tokens: %s" % [x.cpu().numpy() for x in self.stops], flush=True)
23
+ return False
24
+
25
+
26
+ def get_stopping(prompt_type, tokenizer, device, human='<human>:', bot="<bot>:"):
27
+ if prompt_type in ['human_bot', 'instruct_vicuna', 'instruct_with_end']:
28
+ if prompt_type == 'human_bot':
29
+ # encounters = [prompt.count(human) + 1, prompt.count(bot) + 1]
30
+ # stopping only starts once output is beyond prompt
31
+ # 1 human is enough to trigger, but need 2 bots, because very first view back will be bot we added
32
+ stop_words = [human, bot, '\n' + human, '\n' + bot]
33
+ encounters = [1, 2]
34
+ elif prompt_type == 'instruct_vicuna':
35
+ # even below is not enough, generic strings and many ways to encode
36
+ stop_words = [
37
+ '### Human:',
38
+ """
39
+ ### Human:""",
40
+ """
41
+ ### Human:
42
+ """,
43
+ '### Assistant:',
44
+ """
45
+ ### Assistant:""",
46
+ """
47
+ ### Assistant:
48
+ """,
49
+ ]
50
+ encounters = [1, 2]
51
+ else:
52
+ # some instruct prompts have this as end, doesn't hurt to stop on it since not common otherwise
53
+ stop_words = ['### End']
54
+ encounters = [1]
55
+ stop_words_ids = [
56
+ tokenizer(stop_word, return_tensors='pt')['input_ids'].squeeze() for stop_word in stop_words]
57
+ # handle single token case
58
+ stop_words_ids = [x if len(x.shape) > 0 else torch.tensor([x]) for x in stop_words_ids]
59
+ stop_words_ids = [x for x in stop_words_ids if x.shape[0] > 0]
60
+ # avoid padding in front of tokens
61
+ if tokenizer.pad_token:
62
+ stop_words_ids = [x[1:] if x[0] == tokenizer.pad_token_id and len(x) > 1 else x for x in stop_words_ids]
63
+ # handle fake \n added
64
+ stop_words_ids = [x[1:] if y[0] == '\n' else x for x, y in zip(stop_words_ids, stop_words)]
65
+ # build stopper
66
+ stopping_criteria = StoppingCriteriaList(
67
+ [StoppingCriteriaSub(stops=stop_words_ids, encounters=encounters, device=device)])
68
+ else:
69
+ stopping_criteria = StoppingCriteriaList()
70
+ return stopping_criteria