playing-code commited on
Commit
848a62d
1 Parent(s): b7e1991

commit from shuqi

Browse files
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "seed_encoder_3_decoder_layers",
3
+ "activation_dropout": 0.0,
4
+ "activation_fn": "gelu",
5
+ "adaptive_input": false,
6
+ "adaptive_softmax_cutoff": null,
7
+ "adaptive_softmax_dropout": 0,
8
+ "architectures": [
9
+ "SEEDEncoderDot_NLL_LN"
10
+ ],
11
+ "attention_dropout": 0.1,
12
+ "cross_self_attention": false,
13
+ "decoder_atten_window": 8,
14
+ "decoder_attention_heads": 12,
15
+ "decoder_embed_dim": 768,
16
+ "decoder_embed_path": null,
17
+ "decoder_ffn_embed_dim": 3072,
18
+ "decoder_input_dim": 768,
19
+ "decoder_layerdrop": 0,
20
+ "decoder_layers": 3,
21
+ "decoder_layers_to_keep": null,
22
+ "decoder_learned_pos": true,
23
+ "decoder_normalize_before": true,
24
+ "decoder_output_dim": 768,
25
+ "dropout": 0.1,
26
+ "encoder_attention_heads": 12,
27
+ "encoder_embed_dim": 768,
28
+ "encoder_ffn_embed_dim": 3072,
29
+ "encoder_layerdrop": 0.0,
30
+ "encoder_layers": 12,
31
+ "encoder_layers_to_keep": null,
32
+ "finetuning_task": "msmarco",
33
+ "layernorm_embedding": true,
34
+ "max_positions": 512,
35
+ "max_source_positions": 512,
36
+ "max_target_positions": 512,
37
+ "model_type": "seed_encoder",
38
+ "no_cross_attention": false,
39
+ "no_scale_embedding": true,
40
+ "no_token_positional_embeddings": false,
41
+ "pad_token_id": 1,
42
+ "pooler_activation_fn": "tanh",
43
+ "pooler_dropout": 0.0,
44
+ "quant_noise_pq": 0.0,
45
+ "quant_noise_pq_block_size": 8,
46
+ "share_all_embeddings": true,
47
+ "share_decoder_input_output_embed": true,
48
+ "tie_adaptive_weights": true,
49
+ "train_ratio": "0.5:0.5",
50
+ "vocab_size": 32769
51
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbbd336ca67423d442bbf9983fa8a09210da62f33e352dd1287f1beeed0f4266
3
+ size 862084111
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "<mask>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "<mask>", "fb_model_kwargs": {}, "name_or_path": "seed_enoder_3_decoder_layers"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff