supermy commited on
Commit
478b9ca
1 Parent(s): d933d45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -3
app.py CHANGED
@@ -9,14 +9,21 @@ tokenizer = BertTokenizer.from_pretrained("supermy/couplet-gpt2")
9
  model = GPT2LMHeadModel.from_pretrained("supermy/couplet-gpt2")
10
  model.eval()
11
 
 
 
12
  def top_k_top_p_filtering( logits, top_k=0, top_p=0.0, filter_value=-float('Inf') ):
 
13
  assert logits.dim() == 1
 
14
  top_k = min( top_k, logits.size(-1) )
15
  if top_k > 0:
 
 
 
16
  indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
17
- logits[indices_to_remove] = filter_value
18
  if top_p > 0.0:
19
- sorted_logits, sorted_indices = torch.sort(logits, descending=True)
20
  cumulative_probs = torch.cumsum( F.softmax(sorted_logits, dim=-1), dim=-1 )
21
  sorted_indices_to_remove = cumulative_probs > top_p
22
  sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
@@ -30,13 +37,17 @@ def generate(input_text):
30
  input_ids.extend( tokenizer.encode(input_text + "-", add_special_tokens=False) )
31
  input_ids = torch.tensor( [input_ids] )
32
 
 
33
  generated = []
34
  for _ in range(100):
35
  output = model(input_ids)
36
-
37
  next_token_logits = output.logits[0, -1, :]
 
38
  next_token_logits[ tokenizer.convert_tokens_to_ids('[UNK]') ] = -float('Inf')
 
39
  filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=8, top_p=1)
 
40
  next_token = torch.multinomial( F.softmax(filtered_logits, dim=-1), num_samples=1 )
41
  if next_token == tokenizer.sep_token_id:
42
  break
 
9
  model = GPT2LMHeadModel.from_pretrained("supermy/couplet-gpt2")
10
  model.eval()
11
 
12
+ # top_k或top_p解码策略,仅保留top_k个或累积概率到达top_p的标记,其他标记设为filter_value,后续在选取标记的过程中会取不到值设为无穷小。
13
+ # 从模型输出的logit里面,划分出概率最高的几个
14
  def top_k_top_p_filtering( logits, top_k=0, top_p=0.0, filter_value=-float('Inf') ):
15
+ #确保输出logit维度为1行若干列的矩阵,便于处理
16
  assert logits.dim() == 1
17
+ #将top_k的值初始化为logit元素个数和设定的top_k之间的最小值
18
  top_k = min( top_k, logits.size(-1) )
19
  if top_k > 0:
20
+ # 去除掉概率值小于top_k里最后一个token概率的后续token
21
+ # torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
22
+ # ...表示其他维度由计算机自行推断
23
  indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
24
+ logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
25
  if top_p > 0.0:
26
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
27
  cumulative_probs = torch.cumsum( F.softmax(sorted_logits, dim=-1), dim=-1 )
28
  sorted_indices_to_remove = cumulative_probs > top_p
29
  sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
 
37
  input_ids.extend( tokenizer.encode(input_text + "-", add_special_tokens=False) )
38
  input_ids = torch.tensor( [input_ids] )
39
 
40
+ # 最多生成max_len个token
41
  generated = []
42
  for _ in range(100):
43
  output = model(input_ids)
44
+ # 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
45
  next_token_logits = output.logits[0, -1, :]
46
+ # 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
47
  next_token_logits[ tokenizer.convert_tokens_to_ids('[UNK]') ] = -float('Inf')
48
+ # 使用top_k_top_p_filtering函数,按照top_k和top_p的值,对预测结果进行筛选
49
  filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=8, top_p=1)
50
+ # torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
51
  next_token = torch.multinomial( F.softmax(filtered_logits, dim=-1), num_samples=1 )
52
  if next_token == tokenizer.sep_token_id:
53
  break