ReaderBench commited on
Commit
fb77fbb
1 Parent(s): 15b9d4a

update how to use

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -18,19 +18,19 @@ For code and evaluation check out [GitHub](https://github.com/readerbench/RoGPT2
18
 
19
  ```python
20
  # TensorFlow
21
- from transformers import AutoTokenizer, TFAutoModelWithLMHead
22
 
23
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-medium')
24
- model = TFAutoModelWithLMHead.from_pretrained('readerbench/RoGPT2-medium')
25
  inputs = tokenizer.encode("Este o zi de vara", return_tensors='tf')
26
  text = model.generate(inputs, max_length=1024, no_repeat_ngram_size=2)
27
  print(tokenizer.decode(text[0]))
28
 
29
  # PyTorch
30
- from transformers import AutoTokenizer, AutoModelWithLMHead
31
 
32
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-medium')
33
- model = AutoModelWithLMHead.from_pretrained('readerbench/RoGPT2-medium')
34
  inputs = tokenizer.encode("Este o zi de vara", return_tensors='pt')
35
  text = model.generate(inputs, max_length=1024, no_repeat_ngram_size=2)
36
  print(tokenizer.decode(text[0]))
18
 
19
  ```python
20
  # TensorFlow
21
+ from transformers import AutoTokenizer, TFAutoModelForCausalLM
22
 
23
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-medium')
24
+ model = TFAutoModelForCausalLM.from_pretrained('readerbench/RoGPT2-medium')
25
  inputs = tokenizer.encode("Este o zi de vara", return_tensors='tf')
26
  text = model.generate(inputs, max_length=1024, no_repeat_ngram_size=2)
27
  print(tokenizer.decode(text[0]))
28
 
29
  # PyTorch
30
+ from transformers import AutoTokenizer, AutoModelForCausalLM
31
 
32
  tokenizer = AutoTokenizer.from_pretrained('readerbench/RoGPT2-medium')
33
+ model = AutoModelForCausalLM.from_pretrained('readerbench/RoGPT2-medium')
34
  inputs = tokenizer.encode("Este o zi de vara", return_tensors='pt')
35
  text = model.generate(inputs, max_length=1024, no_repeat_ngram_size=2)
36
  print(tokenizer.decode(text[0]))