Ayham commited on
Commit
d315f9f
1 Parent(s): 1ba99e0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -11
README.md CHANGED
@@ -26,28 +26,20 @@ RougeL= 23.499
26
  ## Intended uses & limitations
27
  To use its API:
28
 
29
-
30
- from transformers import BertTokenizerFast, GPT2Tokenizer, EncoderDecoderModel
31
-
32
 
33
  model = EncoderDecoderModel.from_pretrained("Ayham/roberta_gpt2_summarization_cnn_dailymail")
34
 
 
35
 
36
- input_tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
37
-
38
 
39
  article = """Your Input Text"""
40
 
41
-
42
  input_ids = input_tokenizer(article, return_tensors="pt").input_ids
43
 
44
-
45
  output_ids = model.generate(input_ids)
46
 
47
-
48
- output_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
49
-
50
-
51
  print(output_tokenizer.decode(output_ids[0], skip_special_tokens=True))
52
 
53
  More information needed
 
26
  ## Intended uses & limitations
27
  To use its API:
28
 
29
+ from transformers import RobertaTokenizerFast, GPT2Tokenizer, EncoderDecoderModel
 
 
30
 
31
  model = EncoderDecoderModel.from_pretrained("Ayham/roberta_gpt2_summarization_cnn_dailymail")
32
 
33
+ input_tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base')
34
 
35
+ output_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
 
36
 
37
  article = """Your Input Text"""
38
 
 
39
  input_ids = input_tokenizer(article, return_tensors="pt").input_ids
40
 
 
41
  output_ids = model.generate(input_ids)
42
 
 
 
 
 
43
  print(output_tokenizer.decode(output_ids[0], skip_special_tokens=True))
44
 
45
  More information needed