ReaderBench commited on
Commit
0a955f1
1 Parent(s): e60b3cd

update how to use

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -18,16 +18,16 @@ For code and evaluation check out [GitHub](https://github.com/readerbench/RoGPT2
18
 
19
  ```python
20
  # TensorFlow
21
- from transformers import AutoTokenizer, TFAutoModelWithLMHead
22
 
23
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-large')
24
- model = TFAutoModelWithLMHead.from_pretrained('readerbench/RoGPT2-large')
25
  inputs = tokenizer.encode("Este o zi de vara", return_tensors='tf')
26
  text = model.generate(inputs, max_length=1024, no_repeat_ngram_size=2)
27
  print(tokenizer.decode(text[0]))
28
 
29
  # PyTorch
30
- from transformers import AutoTokenizer, AutoModelWithLMHead
31
 
32
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-large')
33
  model = AutoModelForCausalLM.from_pretrained('readerbench/RoGPT2-large')
18
 
19
  ```python
20
  # TensorFlow
21
+ from transformers import AutoTokenizer, TFAutoModelForCausalLM
22
 
23
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-large')
24
+ model = TFAutoModelForCausalLM.from_pretrained('readerbench/RoGPT2-large')
25
  inputs = tokenizer.encode("Este o zi de vara", return_tensors='tf')
26
  text = model.generate(inputs, max_length=1024, no_repeat_ngram_size=2)
27
  print(tokenizer.decode(text[0]))
28
 
29
  # PyTorch
30
+ from transformers import AutoTokenizer, AutoModelForCausalLM
31
 
32
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-large')
33
  model = AutoModelForCausalLM.from_pretrained('readerbench/RoGPT2-large')