shaojiang commited on
Commit
3c58126
1 Parent(s): aa81f3d

Delete generate.py

Browse files
Files changed (1) hide show
  1. generate.py +0 -187
generate.py DELETED
@@ -1,187 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- import os
4
- import argparse
5
- from tqdm import trange
6
- from transformers import GPT2LMHeadModel
7
-
8
-
9
- def is_word(word):
10
- for item in list(word):
11
- if item not in 'qwertyuiopasdfghjklzxcvbnm':
12
- return False
13
- return True
14
-
15
-
16
- def _is_chinese_char(char):
17
- """Checks whether CP is the codepoint of a CJK character."""
18
- # This defines a "chinese character" as anything in the CJK Unicode block:
19
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
20
- #
21
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
22
- # despite its name. The modern Korean Hangul alphabet is a different block,
23
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
24
- # space-separated words, so they are not treated specially and handled
25
- # like the all of the other languages.
26
- cp = ord(char)
27
- if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
28
- (cp >= 0x3400 and cp <= 0x4DBF) or #
29
- (cp >= 0x20000 and cp <= 0x2A6DF) or #
30
- (cp >= 0x2A700 and cp <= 0x2B73F) or #
31
- (cp >= 0x2B740 and cp <= 0x2B81F) or #
32
- (cp >= 0x2B820 and cp <= 0x2CEAF) or
33
- (cp >= 0xF900 and cp <= 0xFAFF) or #
34
- (cp >= 0x2F800 and cp <= 0x2FA1F)): #
35
- return True
36
-
37
- return False
38
-
39
-
40
- def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
41
- """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
42
- Args:
43
- logits: logits distribution shape (vocabulary size)
44
- top_k > 0: keep only top k tokens with highest probability (top-k filtering).
45
- top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
46
- Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
47
- From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
48
- """
49
- assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
50
- top_k = min(top_k, logits.size(-1)) # Safety check
51
- if top_k > 0:
52
- # Remove all tokens with a probability less than the last token of the top-k
53
- indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
54
- logits[indices_to_remove] = filter_value
55
-
56
- if top_p > 0.0:
57
- sorted_logits, sorted_indices = torch.sort(logits, descending=True)
58
- cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
59
-
60
- # Remove tokens with cumulative probability above the threshold
61
- sorted_indices_to_remove = cumulative_probs > top_p
62
- # Shift the indices to the right to keep also the first token above the threshold
63
- sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
64
- sorted_indices_to_remove[..., 0] = 0
65
-
66
- indices_to_remove = sorted_indices[sorted_indices_to_remove]
67
- logits[indices_to_remove] = filter_value
68
- return logits
69
-
70
-
71
- def sample_sequence(model, context, length, n_ctx, tokenizer, temperature=1.0, top_k=30, top_p=0.0, repitition_penalty=1.0,
72
- device='cpu'):
73
- context = torch.tensor(context, dtype=torch.long, device=device)
74
- context = context.unsqueeze(0)
75
- generated = context
76
- with torch.no_grad():
77
- for _ in trange(length):
78
- inputs = {'input_ids': generated[0][-(n_ctx - 1):].unsqueeze(0)}
79
- outputs = model(
80
- **inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
81
- next_token_logits = outputs[0][0, -1, :]
82
- for id in set(generated):
83
- next_token_logits[id] /= repitition_penalty
84
- next_token_logits = next_token_logits / temperature
85
- next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
86
- filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
87
- next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
88
- generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
89
- return generated.tolist()[0]
90
-
91
-
92
- def fast_sample_sequence(model, context, length, temperature=1.0, top_k=30, top_p=0.0, device='cpu'):
93
- inputs = torch.LongTensor(context).view(1, -1).to(device)
94
- if len(context) > 1:
95
- _, past = model(inputs[:, :-1], None)[:2]
96
- prev = inputs[:, -1].view(1, -1)
97
- else:
98
- past = None
99
- prev = inputs
100
- generate = [] + context
101
- with torch.no_grad():
102
- for i in trange(length):
103
- output = model(prev, past=past)
104
- output, past = output[:2]
105
- output = output[-1].squeeze(0) / temperature
106
- filtered_logits = top_k_top_p_filtering(output, top_k=top_k, top_p=top_p)
107
- next_token = torch.multinomial(torch.softmax(filtered_logits, dim=-1), num_samples=1)
108
- generate.append(next_token.item())
109
- prev = next_token.view(1, 1)
110
- return generate
111
-
112
-
113
- # 通过命令行参数--fast_pattern,指定模式
114
- def generate(n_ctx, model, context, length, tokenizer, temperature=1, top_k=0, top_p=0.0, repitition_penalty=1.0, device='cpu',
115
- is_fast_pattern=False):
116
- if is_fast_pattern:
117
- return fast_sample_sequence(model, context, length, temperature=temperature, top_k=top_k, top_p=top_p,
118
- device=device)
119
- else:
120
- return sample_sequence(model, context, length, n_ctx, tokenizer=tokenizer, temperature=temperature, top_k=top_k, top_p=top_p,
121
- repitition_penalty=repitition_penalty, device=device)
122
-
123
- def smp_generate(pre_str):
124
-
125
- from tokenizations import tokenization_bert
126
-
127
- os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3' # 此处设置程序使用哪些显卡
128
- length = 500
129
- batch_size = 1
130
- nsamples = 1
131
- temperature = 1
132
- topk = 8
133
- topp = 0
134
- repetition_penalty = 1.0
135
- model_path = 'pretrained'
136
- tokenizer_path = 'cache/vocab.txt'
137
- save_samples = False
138
- save_samples_path = '.'
139
- fast_pattern = True
140
- prefix = pre_str
141
-
142
- device = "cuda" if torch.cuda.is_available() else "cpu"
143
-
144
- tokenizer = tokenization_bert.BertTokenizer(vocab_file=tokenizer_path)
145
- model = GPT2LMHeadModel.from_pretrained(model_path)
146
- model.to(device)
147
- model.eval()
148
-
149
- n_ctx = model.config.n_ctx
150
-
151
- if length == -1:
152
- length = model.config.n_ctx
153
-
154
- while True:
155
- raw_text = prefix
156
- context_tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(raw_text))
157
- generated = 0
158
- for _ in range(nsamples // batch_size):
159
- out = generate(
160
- n_ctx=n_ctx,
161
- model=model,
162
- context=context_tokens,
163
- length=length,
164
- is_fast_pattern=fast_pattern, tokenizer=tokenizer,
165
- temperature=temperature, top_k=topk, top_p=topp, repitition_penalty=repetition_penalty, device=device
166
- )
167
- for i in range(batch_size):
168
- generated += 1
169
- text = tokenizer.convert_ids_to_tokens(out)
170
- for i, item in enumerate(text[:-1]): # 确保英文前后有空格
171
- if is_word(item) and is_word(text[i + 1]):
172
- text[i] = item + ' '
173
- for i, item in enumerate(text):
174
- if item == '[MASK]':
175
- text[i] = ''
176
- elif item == '[CLS]':
177
- text[i] = '\n\n'
178
- elif item == '[SEP]':
179
- text[i] = '\n'
180
- info = "=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40 + "\n"
181
- text = ''.join(text).replace('##', '').strip()
182
- return text
183
-
184
-
185
- if __name__ == '__main__':
186
- print(smp_generate('曹贼休走'))
187
-