Ayham commited on
Commit
1ba99e0
1 Parent(s): bd7a32f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -4
README.md CHANGED
@@ -24,28 +24,36 @@ Rouge2= 16.292
24
 
25
  RougeL= 23.499
26
  ## Intended uses & limitations
 
27
 
28
- More information needed
29
 
30
- To use its API:
31
  from transformers import BertTokenizerFast, GPT2Tokenizer, EncoderDecoderModel
32
 
 
33
  model = EncoderDecoderModel.from_pretrained("Ayham/roberta_gpt2_summarization_cnn_dailymail")
34
- # reuse tokenizer from bert2bert encoder-decoder model
 
35
  input_tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
36
 
37
 
38
  article = """Your Input Text"""
39
 
 
40
  input_ids = input_tokenizer(article, return_tensors="pt").input_ids
 
 
41
  output_ids = model.generate(input_ids)
42
 
43
- # we need a gpt2 tokenizer for the output word embeddings
44
  output_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
 
 
45
  print(output_tokenizer.decode(output_ids[0], skip_special_tokens=True))
46
 
47
  More information needed
48
 
 
 
49
  ## Training procedure
50
 
51
  ### Training hyperparameters
 
24
 
25
  RougeL= 23.499
26
  ## Intended uses & limitations
27
+ To use its API:
28
 
 
29
 
 
30
  from transformers import BertTokenizerFast, GPT2Tokenizer, EncoderDecoderModel
31
 
32
+
33
  model = EncoderDecoderModel.from_pretrained("Ayham/roberta_gpt2_summarization_cnn_dailymail")
34
+
35
+
36
  input_tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
37
 
38
 
39
  article = """Your Input Text"""
40
 
41
+
42
  input_ids = input_tokenizer(article, return_tensors="pt").input_ids
43
+
44
+
45
  output_ids = model.generate(input_ids)
46
 
47
+
48
  output_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
49
+
50
+
51
  print(output_tokenizer.decode(output_ids[0], skip_special_tokens=True))
52
 
53
  More information needed
54
 
55
+ More information needed
56
+
57
  ## Training procedure
58
 
59
  ### Training hyperparameters