Xenova HF staff commited on
Commit
ec628b9
1 Parent(s): 7560ecd

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -70,7 +70,7 @@
70
  "num_beams": 4
71
  }
72
  },
73
- "transformers_version": "4.26.1",
74
  "use_cache": true,
75
  "vocab_size": 50264
76
  }
 
70
  "num_beams": 4
71
  }
72
  },
73
+ "transformers_version": "4.29.2",
74
  "use_cache": true,
75
  "vocab_size": 50264
76
  }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "forced_bos_token_id": 0,
8
+ "forced_eos_token_id": 2,
9
+ "length_penalty": 2.0,
10
+ "max_length": 142,
11
+ "min_length": 56,
12
+ "no_repeat_ngram_size": 3,
13
+ "num_beams": 4,
14
+ "pad_token_id": 1,
15
+ "transformers_version": "4.29.2"
16
+ }
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77ad751280773fe931bfd719c9f16d136b7a3b03bf8ab4f7500cb73cba380a65
3
+ size 819558619
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c8e72f774bf8a91defdc5f63bfffb12bd3884e811b7859b21ccc80692ada7d9
3
+ size 819782600
onnx/decoder_model_merged_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a16e203dd8f5e7f5be4c2452110669d94c9f1bcdaff7185b6e3e006d3debf01
3
- size 206659464
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9015adf6852d623044d63b1d2f0e1b22e98889afc5508cdffa532f63d05be49
3
+ size 206112798
onnx/decoder_model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf78c246385eb43eedd09db3727275eb2ea890c8f80df7600c22871f9fa63d49
3
- size 206522287
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0a59e308e6f10df1a8829b6beafac12fa1b9a50cade5255c382c15cb9541fa
3
+ size 205756050
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8197cf1d02b2e64f77fb70fbe4b15d6f1b654feb839cc0484103c41b9efb61
3
+ size 769135510
onnx/decoder_with_past_model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:687fcc78d2b81908b5dc60089557bd461c492e2e2ffffc982d86a0f90c4545ae
3
- size 193754572
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8dab3abe6b4b846e223d531ee426186c7808044c8b6a04df4e4d40a456bf152
3
+ size 193054589
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce28b2b7a9948ed8cdb414ff5e4037cfa5f54ae16e9797b3772d651904e293c
3
+ size 512520582
onnx/encoder_model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8820a208f45059db2cbcd5b7b59ff3829e946789420058244cc8f429ef707fab
3
- size 128853768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffb6c0547b82b923abff346f03b2f0d75489bd71bdacc2fb773a9dc5d94b7d68
3
+ size 128543065
tokenizer.json CHANGED
@@ -5,55 +5,56 @@
5
  "added_tokens": [
6
  {
7
  "id": 0,
8
- "special": true,
9
  "content": "<s>",
10
  "single_word": false,
11
  "lstrip": false,
12
  "rstrip": false,
13
- "normalized": true
 
14
  },
15
  {
16
  "id": 1,
17
- "special": true,
18
  "content": "<pad>",
19
  "single_word": false,
20
  "lstrip": false,
21
  "rstrip": false,
22
- "normalized": true
 
23
  },
24
  {
25
  "id": 2,
26
- "special": true,
27
  "content": "</s>",
28
  "single_word": false,
29
  "lstrip": false,
30
  "rstrip": false,
31
- "normalized": true
 
32
  },
33
  {
34
  "id": 3,
35
- "special": true,
36
  "content": "<unk>",
37
  "single_word": false,
38
  "lstrip": false,
39
  "rstrip": false,
40
- "normalized": true
 
41
  },
42
  {
43
  "id": 50264,
44
- "special": true,
45
  "content": "<mask>",
46
  "single_word": false,
47
  "lstrip": true,
48
  "rstrip": false,
49
- "normalized": true
 
50
  }
51
  ],
52
  "normalizer": null,
53
  "pre_tokenizer": {
54
  "type": "ByteLevel",
55
  "add_prefix_space": false,
56
- "trim_offsets": true
 
57
  },
58
  "post_processor": {
59
  "type": "RobertaProcessing",
@@ -71,7 +72,8 @@
71
  "decoder": {
72
  "type": "ByteLevel",
73
  "add_prefix_space": true,
74
- "trim_offsets": true
 
75
  },
76
  "model": {
77
  "type": "BPE",
@@ -80,6 +82,7 @@
80
  "continuing_subword_prefix": "",
81
  "end_of_word_suffix": "",
82
  "fuse_unk": false,
 
83
  "vocab": {
84
  "<s>": 0,
85
  "<pad>": 1,
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
 
8
  "content": "<s>",
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
+ "normalized": true,
13
+ "special": true
14
  },
15
  {
16
  "id": 1,
 
17
  "content": "<pad>",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
+ "normalized": true,
22
+ "special": true
23
  },
24
  {
25
  "id": 2,
 
26
  "content": "</s>",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
+ "normalized": true,
31
+ "special": true
32
  },
33
  {
34
  "id": 3,
 
35
  "content": "<unk>",
36
  "single_word": false,
37
  "lstrip": false,
38
  "rstrip": false,
39
+ "normalized": true,
40
+ "special": true
41
  },
42
  {
43
  "id": 50264,
 
44
  "content": "<mask>",
45
  "single_word": false,
46
  "lstrip": true,
47
  "rstrip": false,
48
+ "normalized": true,
49
+ "special": true
50
  }
51
  ],
52
  "normalizer": null,
53
  "pre_tokenizer": {
54
  "type": "ByteLevel",
55
  "add_prefix_space": false,
56
+ "trim_offsets": true,
57
+ "use_regex": true
58
  },
59
  "post_processor": {
60
  "type": "RobertaProcessing",
 
72
  "decoder": {
73
  "type": "ByteLevel",
74
  "add_prefix_space": true,
75
+ "trim_offsets": true,
76
+ "use_regex": true
77
  },
78
  "model": {
79
  "type": "BPE",
 
82
  "continuing_subword_prefix": "",
83
  "end_of_word_suffix": "",
84
  "fuse_unk": false,
85
+ "byte_fallback": false,
86
  "vocab": {
87
  "<s>": 0,
88
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -8,6 +8,7 @@
8
  "rstrip": false,
9
  "single_word": false
10
  },
 
11
  "cls_token": {
12
  "__type": "AddedToken",
13
  "content": "<s>",
@@ -34,7 +35,6 @@
34
  "single_word": false
35
  },
36
  "model_max_length": 1024,
37
- "name_or_path": "sshleifer/distilbart-cnn-6-6",
38
  "pad_token": {
39
  "__type": "AddedToken",
40
  "content": "<pad>",
@@ -51,7 +51,6 @@
51
  "rstrip": false,
52
  "single_word": false
53
  },
54
- "special_tokens_map_file": null,
55
  "tokenizer_class": "BartTokenizer",
56
  "trim_offsets": true,
57
  "unk_token": {
 
8
  "rstrip": false,
9
  "single_word": false
10
  },
11
+ "clean_up_tokenization_spaces": true,
12
  "cls_token": {
13
  "__type": "AddedToken",
14
  "content": "<s>",
 
35
  "single_word": false
36
  },
37
  "model_max_length": 1024,
 
38
  "pad_token": {
39
  "__type": "AddedToken",
40
  "content": "<pad>",
 
51
  "rstrip": false,
52
  "single_word": false
53
  },
 
54
  "tokenizer_class": "BartTokenizer",
55
  "trim_offsets": true,
56
  "unk_token": {