Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -32,9 +32,9 @@ Yunfan Shao, Zhichao Geng, Yitao Liu, Junqi Dai, Fei Yang, Li Zhe, Hujun Bao, Xi
32
  >>> from transformers import BertTokenizer
33
  >>> tokenizer = BertTokenizer.from_pretrained("fnlp/cpt-large")
34
  >>> model = CPTForConditionalGeneration.from_pretrained("fnlp/cpt-large")
35
- >>> inputs = tokenizer.encode("北京是[MASK]的首都", return_tensors='pt')
36
  >>> pred_ids = model.generate(input_ids, num_beams=4, max_length=20)
37
- >>> print(tokenizer.convert_ids_to_tokens(pred_ids[i]))
38
  ['[SEP]', '[CLS]', '北', '京', '是', '中', '国', '的', '首', '都', '[SEP]']
39
  ```
40
 
 
32
  >>> from transformers import BertTokenizer
33
  >>> tokenizer = BertTokenizer.from_pretrained("fnlp/cpt-large")
34
  >>> model = CPTForConditionalGeneration.from_pretrained("fnlp/cpt-large")
35
+ >>> input_ids = tokenizer.encode("北京是[MASK]的首都", return_tensors='pt')
36
  >>> pred_ids = model.generate(input_ids, num_beams=4, max_length=20)
37
+ >>> print(tokenizer.convert_ids_to_tokens(pred_ids[0]))
38
  ['[SEP]', '[CLS]', '北', '京', '是', '中', '国', '的', '首', '都', '[SEP]']
39
  ```
40