patrickvonplaten
HF staff
commited on
Commit
9b413d4
1 Parent(s): 66c2f9c
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "gelu",
4
+ "add_bias_logits": false,
5
+ "add_final_layer_norm": false,
6
+ "architectures": [
7
+ "BartForConditionalGeneration"
8
+ ],
9
+ "attention_dilation": [
10
+ 1,
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1
16
+ ],
17
+ "attention_dropout": 0.0,
18
+ "attention_mode": "sliding_chunks",
19
+ "attention_probs_dropout_prob": 0.0,
20
+ "attention_window": [
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512,
26
+ 512
27
+ ],
28
+ "autoregressive": false,
29
+ "bos_token_id": 0,
30
+ "classif_dropout": 0.0,
31
+ "d_model": 768,
32
+ "decoder_attention_heads": 12,
33
+ "decoder_ffn_dim": 3072,
34
+ "decoder_layerdrop": 0.0,
35
+ "decoder_layers": 6,
36
+ "dropout": 0.1,
37
+ "encoder_attention_heads": 12,
38
+ "encoder_ffn_dim": 3072,
39
+ "encoder_layerdrop": 0.0,
40
+ "encoder_layers": 6,
41
+ "eos_token_id": 2,
42
+ "extra_pos_embeddings": 2,
43
+ "gradient_checkpointing": false,
44
+ "id2label": {
45
+ "0": "LABEL_0",
46
+ "1": "LABEL_1",
47
+ "2": "LABEL_2"
48
+ },
49
+ "init_std": 0.02,
50
+ "is_encoder_decoder": true,
51
+ "label2id": {
52
+ "LABEL_0": 0,
53
+ "LABEL_1": 1,
54
+ "LABEL_2": 2
55
+ },
56
+ "max_decoder_position_embeddings": 1024,
57
+ "max_encoder_position_embeddings": 16384,
58
+ "model_type": "bart",
59
+ "normalize_before": false,
60
+ "normalize_embedding": true,
61
+ "num_hidden_layers": 6,
62
+ "pad_token_id": 1,
63
+ "scale_embedding": false,
64
+ "static_position_embeddings": false,
65
+ "vocab_size": 50265
66
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10fba4d32495379442691e194b65372eb9104b3cf0501b4c3bb3b4c3ca4c1829
3
+ size 605132379
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"model_max_length": 16384}
vocab.json ADDED
The diff for this file is too large to render. See raw diff