wyu97 commited on
Commit
2df49c8
1 Parent(s): fb3a0e1
Files changed (3) hide show
  1. README.md +24 -0
  2. config.json +51 -0
  3. tokenizer.json +0 -0
README.md CHANGED
@@ -1,3 +1,27 @@
1
  ---
2
  license: cc-by-4.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
  ---
4
+
5
+ # GenRead (MergeDPR): FiD model trained on TQA
6
+
7
+ -- This is the model checkpoint of GenRead [2], based on the T5-3B and trained on the TriviaQA [1].
8
+
9
+ -- Hyperparameters: 8 x 80GB A100 GPUs; batch size 16; AdamW; LR 5e-5; best dev at 9000 steps
10
+
11
+ References:
12
+
13
+ [1] TriviaQA: A Large Scale Dataset for Reading Comprehension and Question Answering. ACL 2017
14
+
15
+ [2] Generate rather than Retrieve: Large Language Models are Strong Context Generators. arXiv 2022
16
+
17
+ ## Model performance
18
+
19
+ We evaluate it on the TriviaQA dataset, the EM score is 74.41.
20
+
21
+
22
+ <a href="https://huggingface.co/exbert/?model=bert-base-uncased">
23
+ <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
24
+ </a>
25
+ ---
26
+ license: cc-by-4.0
27
+ ---
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "FiDT5"
4
+ ],
5
+ "d_ff": 16384,
6
+ "d_kv": 128,
7
+ "d_model": 1024,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "n_positions": 512,
16
+ "num_heads": 32,
17
+ "num_layers": 24,
18
+ "output_past": true,
19
+ "pad_token_id": 0,
20
+ "relative_attention_num_buckets": 32,
21
+ "task_specific_params": {
22
+ "summarization": {
23
+ "early_stopping": true,
24
+ "length_penalty": 2.0,
25
+ "max_length": 200,
26
+ "min_length": 30,
27
+ "no_repeat_ngram_size": 3,
28
+ "num_beams": 4,
29
+ "prefix": "summarize: "
30
+ },
31
+ "translation_en_to_de": {
32
+ "early_stopping": true,
33
+ "max_length": 300,
34
+ "num_beams": 4,
35
+ "prefix": "translate English to German: "
36
+ },
37
+ "translation_en_to_fr": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to French: "
42
+ },
43
+ "translation_en_to_ro": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to Romanian: "
48
+ }
49
+ },
50
+ "vocab_size": 32128
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff