zizzimars commited on
Commit
f2b28ad
1 Parent(s): e9e7562

End of training

Browse files
Files changed (3) hide show
  1. README.md +80 -0
  2. generation_config.json +9 -0
  3. model.safetensors +1 -1
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: microsoft/speecht5_tts
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: speecht5_finetuned_kazakh_tts2_1
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # speecht5_finetuned_kazakh_tts2_1
15
+
16
+ This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.4800
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
+ - train_batch_size: 4
39
+ - eval_batch_size: 4
40
+ - seed: 42
41
+ - gradient_accumulation_steps: 16
42
+ - total_train_batch_size: 64
43
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
+ - lr_scheduler_type: linear
45
+ - lr_scheduler_warmup_steps: 200
46
+ - training_steps: 2000
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:-----:|:----:|:---------------:|
53
+ | 0.725 | 0.06 | 100 | 0.6639 |
54
+ | 0.6132 | 0.11 | 200 | 0.5466 |
55
+ | 0.571 | 0.17 | 300 | 0.5207 |
56
+ | 0.5647 | 0.22 | 400 | 0.5120 |
57
+ | 0.5556 | 0.28 | 500 | 0.5047 |
58
+ | 0.5475 | 0.34 | 600 | 0.5003 |
59
+ | 0.5432 | 0.39 | 700 | 0.4975 |
60
+ | 0.5366 | 0.45 | 800 | 0.4944 |
61
+ | 0.5376 | 0.5 | 900 | 0.4913 |
62
+ | 0.5325 | 0.56 | 1000 | 0.4868 |
63
+ | 0.5281 | 0.62 | 1100 | 0.4861 |
64
+ | 0.5288 | 0.67 | 1200 | 0.4848 |
65
+ | 0.5251 | 0.73 | 1300 | 0.4825 |
66
+ | 0.5213 | 0.78 | 1400 | 0.4818 |
67
+ | 0.5225 | 0.84 | 1500 | 0.4823 |
68
+ | 0.5199 | 0.9 | 1600 | 0.4812 |
69
+ | 0.5211 | 0.95 | 1700 | 0.4816 |
70
+ | 0.5194 | 1.01 | 1800 | 0.4826 |
71
+ | 0.5224 | 1.06 | 1900 | 0.4798 |
72
+ | 0.5213 | 1.12 | 2000 | 0.4800 |
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.38.1
78
+ - Pytorch 2.2.1+cu118
79
+ - Datasets 2.18.0
80
+ - Tokenizers 0.15.2
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "eos_token_id": 2,
6
+ "max_length": 1876,
7
+ "pad_token_id": 1,
8
+ "transformers_version": "4.38.1"
9
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd54333eb368d18b60c7f2d727eaa61d57d9872051f728da88979465e54defc0
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:693d05c613efb07ea946efb3f68b76c2fc800dce1a7a35ba1bb1dec435b098cb
3
  size 577789320