hongjing0312
commited on
Training in progress, step 1000
Browse files- config.json +1 -1
- pytorch_model.bin +2 -2
- tokenizer_config.json +0 -1
- training_args.bin +2 -2
config.json
CHANGED
@@ -85,7 +85,7 @@
|
|
85 |
"speech_decoder_prenet_layers": 2,
|
86 |
"speech_decoder_prenet_units": 256,
|
87 |
"torch_dtype": "float32",
|
88 |
-
"transformers_version": "4.
|
89 |
"use_cache": false,
|
90 |
"use_guided_attention_loss": true,
|
91 |
"vocab_size": 81
|
|
|
85 |
"speech_decoder_prenet_layers": 2,
|
86 |
"speech_decoder_prenet_units": 256,
|
87 |
"torch_dtype": "float32",
|
88 |
+
"transformers_version": "4.32.1",
|
89 |
"use_cache": false,
|
90 |
"use_guided_attention_loss": true,
|
91 |
"vocab_size": 81
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:93e5a5f37ace73955c35f4e0b967a935ba84603142e6deac6ac25d65748542b1
|
3 |
+
size 577878778
|
tokenizer_config.json
CHANGED
@@ -3,7 +3,6 @@
|
|
3 |
"clean_up_tokenization_spaces": true,
|
4 |
"eos_token": "</s>",
|
5 |
"model_max_length": 600,
|
6 |
-
"normalize": false,
|
7 |
"pad_token": "<pad>",
|
8 |
"processor_class": "SpeechT5Processor",
|
9 |
"sp_model_kwargs": {},
|
|
|
3 |
"clean_up_tokenization_spaces": true,
|
4 |
"eos_token": "</s>",
|
5 |
"model_max_length": 600,
|
|
|
6 |
"pad_token": "<pad>",
|
7 |
"processor_class": "SpeechT5Processor",
|
8 |
"sp_model_kwargs": {},
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:923dc5caa4ebf32836d58c714b261dba448a1f77e612b4be61531ab220ef0d5e
|
3 |
+
size 4664
|