mgrbyte commited on
Commit
9ab7558
1 Parent(s): 6897bcf

Upload new model version

Browse files
Files changed (3) hide show
  1. config.json +38 -37
  2. generation_config.json +11 -11
  3. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,44 +1,45 @@
1
  {
2
- "activation_dropout":0.0,
3
- "activation_function":"swish",
4
- "architectures":[
 
5
  "MarianMTModel"
6
  ],
7
- "attention_dropout":0.0,
8
- "bad_words_ids":[
9
  [
10
  50000
11
  ]
12
  ],
13
- "bos_token_id":0,
14
- "d_model":512,
15
- "decoder_attention_heads":8,
16
- "decoder_ffn_dim":2048,
17
- "decoder_layerdrop":0.0,
18
- "decoder_layers":6,
19
- "decoder_start_token_id":50000,
20
- "decoder_vocab_size":50001,
21
- "dropout":0.1,
22
- "encoder_attention_heads":8,
23
- "encoder_ffn_dim":2048,
24
- "encoder_layerdrop":0.0,
25
- "encoder_layers":6,
26
- "eos_token_id":0,
27
- "forced_eos_token_id":0,
28
- "init_std":0.02,
29
- "is_encoder_decoder":true,
30
- "max_length":512,
31
- "max_position_embeddings":512,
32
- "model_type":"marian",
33
- "normalize_embedding":false,
34
- "num_beams":6,
35
- "num_hidden_layers":6,
36
- "pad_token_id":50000,
37
- "scale_embedding":true,
38
- "share_encoder_decoder_embeddings":true,
39
- "static_position_embeddings":true,
40
- "torch_dtype":"float16",
41
- "transformers_version":"4.26.1",
42
- "use_cache":true,
43
- "vocab_size":50001
44
- }
 
1
  {
2
+ "_name_or_path": "models/split_01/conversions/transformers",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "swish",
5
+ "architectures": [
6
  "MarianMTModel"
7
  ],
8
+ "attention_dropout": 0.0,
9
+ "bad_words_ids": [
10
  [
11
  50000
12
  ]
13
  ],
14
+ "bos_token_id": 0,
15
+ "d_model": 512,
16
+ "decoder_attention_heads": 8,
17
+ "decoder_ffn_dim": 2048,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 6,
20
+ "decoder_start_token_id": 50000,
21
+ "decoder_vocab_size": 50001,
22
+ "dropout": 0.1,
23
+ "encoder_attention_heads": 8,
24
+ "encoder_ffn_dim": 2048,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 6,
27
+ "eos_token_id": 0,
28
+ "forced_eos_token_id": 0,
29
+ "init_std": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "max_length": 512,
32
+ "max_position_embeddings": 512,
33
+ "model_type": "marian",
34
+ "normalize_embedding": false,
35
+ "num_beams": 6,
36
+ "num_hidden_layers": 6,
37
+ "pad_token_id": 50000,
38
+ "scale_embedding": true,
39
+ "share_encoder_decoder_embeddings": true,
40
+ "static_position_embeddings": true,
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.29.2",
43
+ "use_cache": true,
44
+ "vocab_size": 50001
45
+ }
generation_config.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "_from_model_config":true,
3
- "bad_words_ids":[
4
  [
5
  50000
6
  ]
7
  ],
8
- "bos_token_id":0,
9
- "decoder_start_token_id":50000,
10
- "eos_token_id":0,
11
- "forced_eos_token_id":0,
12
- "max_length":512,
13
- "num_beams":6,
14
- "pad_token_id":50000,
15
- "transformers_version":"4.26.1"
16
- }
 
1
  {
2
+ "_from_model_config": true,
3
+ "bad_words_ids": [
4
  [
5
  50000
6
  ]
7
  ],
8
+ "bos_token_id": 0,
9
+ "decoder_start_token_id": 50000,
10
+ "eos_token_id": 0,
11
+ "forced_eos_token_id": 0,
12
+ "max_length": 512,
13
+ "num_beams": 6,
14
+ "pad_token_id": 50000,
15
+ "transformers_version": "4.29.2"
16
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d03e34d383709a7ebbaf203f528dfcd03f329b187542ae0873665284eda6888
3
- size 190862153
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ab0d59f59210d9797e036cab04f4a7573abc3838bccccc21ba5fdad012fe77a
3
+ size 279239426