conan1024hao commited on
Commit
979f8e6
1 Parent(s): b243c3f

update readme

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -6,7 +6,7 @@ datasets:
6
  - cc100
7
  mask_token: "[MASK]"
8
  widget:
9
- - text: "早稲田 大学 で 自然 言語 処理 を [MASK] する 。"
10
  ---
11
 
12
  # nlp-waseda/roberta-base-japanese-with-auto-jumanpp
@@ -23,7 +23,7 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
23
  tokenizer = AutoTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
24
  model = AutoModelForMaskedLM.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
25
 
26
- sentence = '早稲田 大学 で 自然 言語 処理 を [MASK] する 。'
27
  encoding = tokenizer(sentence, return_tensors='pt')
28
  ...
29
  ```
6
  - cc100
7
  mask_token: "[MASK]"
8
  widget:
9
+ - text: "早稲田大学で自然言語処理を[MASK]する。"
10
  ---
11
 
12
  # nlp-waseda/roberta-base-japanese-with-auto-jumanpp
23
  tokenizer = AutoTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
24
  model = AutoModelForMaskedLM.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
25
 
26
+ sentence = '早稲田大学で自然言語処理を[MASK]する。'
27
  encoding = tokenizer(sentence, return_tensors='pt')
28
  ...
29
  ```