File size: 41,818 Bytes
2c7c7ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
23-04-17 14:12:03.719 - INFO:   name: louise
  model: extensibletrainer
  scale: 1
  gpu_ids: [0]
  start_step: 0
  checkpointing_enabled: True
  fp16: True
  bitsandbytes: True
  gpus: 1
  datasets:[
    train:[
      name: training
      n_workers: 2
      batch_size: 28
      mode: paired_voice_audio
      path: ./training/louise/train.txt
      fetcher_mode: ['lj']
      phase: train
      max_wav_length: 255995
      max_text_length: 200
      sample_rate: 22050
      load_conditioning: True
      num_conditioning_candidates: 2
      conditioning_length: 44000
      use_bpe_tokenizer: True
      tokenizer_vocab: ./modules/tortoise-tts/tortoise/data/tokenizer.json
      load_aligned_codes: False
      data_type: img
    ]
    val:[
      name: validation
      n_workers: 2
      batch_size: 7
      mode: paired_voice_audio
      path: ./training/louise/validation.txt
      fetcher_mode: ['lj']
      phase: val
      max_wav_length: 255995
      max_text_length: 200
      sample_rate: 22050
      load_conditioning: True
      num_conditioning_candidates: 2
      conditioning_length: 44000
      use_bpe_tokenizer: True
      tokenizer_vocab: ./modules/tortoise-tts/tortoise/data/tokenizer.json
      load_aligned_codes: False
      data_type: img
    ]
  ]
  steps:[
    gpt_train:[
      training: gpt
      loss_log_buffer: 500
      optimizer: adamw
      optimizer_params:[
        lr: 1e-05
        weight_decay: 0.01
        beta1: 0.9
        beta2: 0.96
      ]
      clip_grad_eps: 4
      injectors:[
        paired_to_mel:[
          type: torch_mel_spectrogram
          mel_norm_file: ./modules/tortoise-tts/tortoise/data/mel_norms.pth
          in: wav
          out: paired_mel
        ]
        paired_cond_to_mel:[
          type: for_each
          subtype: torch_mel_spectrogram
          mel_norm_file: ./modules/tortoise-tts/tortoise/data/mel_norms.pth
          in: conditioning
          out: paired_conditioning_mel
        ]
        to_codes:[
          type: discrete_token
          in: paired_mel
          out: paired_mel_codes
          dvae_config: ./models/tortoise/train_diffusion_vocoder_22k_level.yml
        ]
        paired_fwd_text:[
          type: generator
          generator: gpt
          in: ['paired_conditioning_mel', 'padded_text', 'text_lengths', 'paired_mel_codes', 'wav_lengths']
          out: ['loss_text_ce', 'loss_mel_ce', 'logits']
        ]
      ]
      losses:[
        text_ce:[
          type: direct
          weight: 0.01
          key: loss_text_ce
        ]
        mel_ce:[
          type: direct
          weight: 1
          key: loss_mel_ce
        ]
      ]
    ]
  ]
  networks:[
    gpt:[
      type: generator
      which_model_G: unified_voice2
      kwargs:[
        layers: 30
        model_dim: 1024
        heads: 16
        max_text_tokens: 402
        max_mel_tokens: 604
        max_conditioning_inputs: 2
        mel_length_compression: 1024
        number_text_tokens: 256
        number_mel_codes: 8194
        start_mel_token: 8192
        stop_mel_token: 8193
        start_text_token: 255
        train_solo_embeddings: False
        use_mel_codes_as_input: True
        checkpointing: True
        tortoise_compat: True
      ]
    ]
  ]
  path:[
    strict_load: True
    resume_state: ./training/louise/finetune/training_state//2560.state
    root: ./
    experiments_root: ./training/louise/finetune
    models: ./training/louise/finetune/models
    training_state: ./training/louise/finetune/training_state
    log: ./training/louise/finetune
    val_images: ./training/louise/finetune/val_images
  ]
  train:[
    niter: 2950
    warmup_iter: -1
    mega_batch_factor: 4
    val_freq: 100
    ema_enabled: False
    default_lr_scheme: MultiStepLR
    gen_lr_steps: [2, 4, 9, 18, 25, 33, 50, 59]
    lr_gamma: 0.5
  ]
  eval:[
    pure: False
    output_state: gen
  ]
  logger:[
    save_checkpoint_freq: 100
    visuals: ['gen', 'mel']
    visual_debug_rate: 1100
    is_mel_spectrogram: True
  ]
  is_train: True
  dist: False

23-04-17 14:12:03.719 - INFO: Set model [gpt] to ./training/louise/finetune/models/2560_gpt.pth
23-04-17 14:12:03.719 - INFO: Random seed: 4202
23-04-17 14:12:04.440 - INFO: Number of training data elements: 293, iters: 11
23-04-17 14:12:04.440 - INFO: Total epochs needed: 269 for iters 2,950
23-04-17 14:12:11.762 - INFO: Loading model for [./training/louise/finetune/models/2560_gpt.pth]
23-04-17 14:12:13.287 - INFO: Resuming training from epoch: 250, iter: 2560.
23-04-17 14:12:13.343 - INFO: Start training from epoch: 250, iter: 2560
23-04-17 14:12:19.682 - INFO: Training Metrics: {"loss_text_ce": 2.9979398250579834, "loss_mel_ce": 1.0098990201950073, "loss_gpt_total": 1.0398783683776855, "lr": 3.90625e-08, "it": 2561, "step": 1, "steps": 10, "epoch": 250, "iteration_rate": 5.954358339309692}
23-04-17 14:12:24.938 - INFO: Training Metrics: {"loss_text_ce": 2.897242784500122, "loss_mel_ce": 0.9955976009368896, "loss_gpt_total": 1.0245699882507324, "lr": 3.90625e-08, "it": 2562, "step": 2, "steps": 10, "epoch": 250, "iteration_rate": 5.2227089405059814}
23-04-17 14:12:30.029 - INFO: Training Metrics: {"loss_text_ce": 2.887754440307617, "loss_mel_ce": 0.997300922870636, "loss_gpt_total": 1.0261784791946411, "lr": 3.90625e-08, "it": 2563, "step": 3, "steps": 10, "epoch": 250, "iteration_rate": 5.058740615844727}
23-04-17 14:12:35.337 - INFO: Training Metrics: {"loss_text_ce": 2.8679802417755127, "loss_mel_ce": 1.013599157333374, "loss_gpt_total": 1.0422790050506592, "lr": 3.90625e-08, "it": 2564, "step": 4, "steps": 10, "epoch": 250, "iteration_rate": 5.274909496307373}
23-04-17 14:12:40.652 - INFO: Training Metrics: {"loss_text_ce": 2.840557098388672, "loss_mel_ce": 1.0206704139709473, "loss_gpt_total": 1.0490760803222656, "lr": 3.90625e-08, "it": 2565, "step": 5, "steps": 10, "epoch": 250, "iteration_rate": 5.280590295791626}
23-04-17 14:12:46.077 - INFO: Training Metrics: {"loss_text_ce": 2.823390007019043, "loss_mel_ce": 1.0308347940444946, "loss_gpt_total": 1.0590687990188599, "lr": 3.90625e-08, "it": 2566, "step": 6, "steps": 10, "epoch": 250, "iteration_rate": 5.391895771026611}
23-04-17 14:12:51.051 - INFO: Training Metrics: {"loss_text_ce": 2.817110061645508, "loss_mel_ce": 1.0369027853012085, "loss_gpt_total": 1.065073847770691, "lr": 3.90625e-08, "it": 2567, "step": 7, "steps": 10, "epoch": 250, "iteration_rate": 4.941345453262329}
23-04-17 14:12:56.264 - INFO: Training Metrics: {"loss_text_ce": 2.8089237213134766, "loss_mel_ce": 1.041125774383545, "loss_gpt_total": 1.0692150592803955, "lr": 3.90625e-08, "it": 2568, "step": 8, "steps": 10, "epoch": 250, "iteration_rate": 5.180842161178589}
23-04-17 14:13:01.414 - INFO: Training Metrics: {"loss_text_ce": 2.80527400970459, "loss_mel_ce": 1.0451977252960205, "loss_gpt_total": 1.073250412940979, "lr": 3.90625e-08, "it": 2569, "step": 9, "steps": 10, "epoch": 250, "iteration_rate": 5.113191843032837}
23-04-17 14:13:06.446 - INFO: Training Metrics: {"loss_text_ce": 2.801102876663208, "loss_mel_ce": 1.0472731590270996, "loss_gpt_total": 1.0752842426300049, "lr": 3.90625e-08, "it": 2570, "step": 10, "steps": 10, "epoch": 250, "iteration_rate": 4.99919581413269}
23-04-17 14:13:12.069 - INFO: Training Metrics: {"loss_text_ce": 2.7605340480804443, "loss_mel_ce": 1.0799636840820312, "loss_gpt_total": 1.1075689792633057, "lr": 3.90625e-08, "it": 2571, "step": 1, "steps": 10, "epoch": 251, "iteration_rate": 5.377644062042236}
23-04-17 14:13:17.272 - INFO: Training Metrics: {"loss_text_ce": 2.7499582767486572, "loss_mel_ce": 1.0779552459716797, "loss_gpt_total": 1.105454683303833, "lr": 3.90625e-08, "it": 2572, "step": 2, "steps": 10, "epoch": 251, "iteration_rate": 5.170330286026001}
23-04-17 14:13:22.293 - INFO: Training Metrics: {"loss_text_ce": 2.744910478591919, "loss_mel_ce": 1.0738807916641235, "loss_gpt_total": 1.1013298034667969, "lr": 3.90625e-08, "it": 2573, "step": 3, "steps": 10, "epoch": 251, "iteration_rate": 4.988760709762573}
23-04-17 14:13:27.584 - INFO: Training Metrics: {"loss_text_ce": 2.7366104125976562, "loss_mel_ce": 1.0709095001220703, "loss_gpt_total": 1.0982756614685059, "lr": 3.90625e-08, "it": 2574, "step": 4, "steps": 10, "epoch": 251, "iteration_rate": 5.258440256118774}
23-04-17 14:13:32.856 - INFO: Training Metrics: {"loss_text_ce": 2.7334213256835938, "loss_mel_ce": 1.0694692134857178, "loss_gpt_total": 1.0968034267425537, "lr": 3.90625e-08, "it": 2575, "step": 5, "steps": 10, "epoch": 251, "iteration_rate": 5.238672733306885}
23-04-17 14:13:38.233 - INFO: Training Metrics: {"loss_text_ce": 2.731761932373047, "loss_mel_ce": 1.070326328277588, "loss_gpt_total": 1.0976439714431763, "lr": 3.90625e-08, "it": 2576, "step": 6, "steps": 10, "epoch": 251, "iteration_rate": 5.344514608383179}
23-04-17 14:13:43.380 - INFO: Training Metrics: {"loss_text_ce": 2.729846954345703, "loss_mel_ce": 1.07101309299469, "loss_gpt_total": 1.0983115434646606, "lr": 3.90625e-08, "it": 2577, "step": 7, "steps": 10, "epoch": 251, "iteration_rate": 5.114569187164307}
23-04-17 14:13:48.196 - INFO: Training Metrics: {"loss_text_ce": 2.731401205062866, "loss_mel_ce": 1.069991111755371, "loss_gpt_total": 1.0973050594329834, "lr": 3.90625e-08, "it": 2578, "step": 8, "steps": 10, "epoch": 251, "iteration_rate": 4.7819600105285645}
23-04-17 14:13:53.295 - INFO: Training Metrics: {"loss_text_ce": 2.7329835891723633, "loss_mel_ce": 1.0689260959625244, "loss_gpt_total": 1.0962557792663574, "lr": 3.90625e-08, "it": 2579, "step": 9, "steps": 10, "epoch": 251, "iteration_rate": 5.066692113876343}
23-04-17 14:13:58.357 - INFO: Training Metrics: {"loss_text_ce": 2.736394166946411, "loss_mel_ce": 1.0684922933578491, "loss_gpt_total": 1.0958563089370728, "lr": 3.90625e-08, "it": 2580, "step": 10, "steps": 10, "epoch": 251, "iteration_rate": 5.028318881988525}
23-04-17 14:14:03.622 - INFO: Training Metrics: {"loss_text_ce": 2.7659308910369873, "loss_mel_ce": 1.0596340894699097, "loss_gpt_total": 1.0872933864593506, "lr": 3.90625e-08, "it": 2581, "step": 1, "steps": 10, "epoch": 252, "iteration_rate": 5.0580034255981445}
23-04-17 14:14:08.773 - INFO: Training Metrics: {"loss_text_ce": 2.768939971923828, "loss_mel_ce": 1.0602259635925293, "loss_gpt_total": 1.087915301322937, "lr": 3.90625e-08, "it": 2582, "step": 2, "steps": 10, "epoch": 252, "iteration_rate": 5.11632776260376}
23-04-17 14:14:14.116 - INFO: Training Metrics: {"loss_text_ce": 2.7684085369110107, "loss_mel_ce": 1.0611714124679565, "loss_gpt_total": 1.0888553857803345, "lr": 3.90625e-08, "it": 2583, "step": 3, "steps": 10, "epoch": 252, "iteration_rate": 5.307675123214722}
23-04-17 14:14:19.592 - INFO: Training Metrics: {"loss_text_ce": 2.7656660079956055, "loss_mel_ce": 1.0610308647155762, "loss_gpt_total": 1.0886874198913574, "lr": 3.90625e-08, "it": 2584, "step": 4, "steps": 10, "epoch": 252, "iteration_rate": 5.442934274673462}
23-04-17 14:14:24.714 - INFO: Training Metrics: {"loss_text_ce": 2.766216278076172, "loss_mel_ce": 1.0616087913513184, "loss_gpt_total": 1.089270830154419, "lr": 3.90625e-08, "it": 2585, "step": 5, "steps": 10, "epoch": 252, "iteration_rate": 5.090041637420654}
23-04-17 14:14:30.011 - INFO: Training Metrics: {"loss_text_ce": 2.7648115158081055, "loss_mel_ce": 1.0620447397232056, "loss_gpt_total": 1.0896925926208496, "lr": 3.90625e-08, "it": 2586, "step": 6, "steps": 10, "epoch": 252, "iteration_rate": 5.263292551040649}
23-04-17 14:14:35.015 - INFO: Training Metrics: {"loss_text_ce": 2.7646167278289795, "loss_mel_ce": 1.0621368885040283, "loss_gpt_total": 1.089782953262329, "lr": 3.90625e-08, "it": 2587, "step": 7, "steps": 10, "epoch": 252, "iteration_rate": 4.971486568450928}
23-04-17 14:14:40.165 - INFO: Training Metrics: {"loss_text_ce": 2.7634594440460205, "loss_mel_ce": 1.062184453010559, "loss_gpt_total": 1.089819073677063, "lr": 3.90625e-08, "it": 2588, "step": 8, "steps": 10, "epoch": 252, "iteration_rate": 5.118138551712036}
23-04-17 14:14:45.352 - INFO: Training Metrics: {"loss_text_ce": 2.761849880218506, "loss_mel_ce": 1.06251060962677, "loss_gpt_total": 1.0901291370391846, "lr": 3.90625e-08, "it": 2589, "step": 9, "steps": 10, "epoch": 252, "iteration_rate": 5.153963804244995}
23-04-17 14:14:50.437 - INFO: Training Metrics: {"loss_text_ce": 2.760878324508667, "loss_mel_ce": 1.0626062154769897, "loss_gpt_total": 1.0902149677276611, "lr": 3.90625e-08, "it": 2590, "step": 10, "steps": 10, "epoch": 252, "iteration_rate": 5.053110361099243}
23-04-17 14:14:55.874 - INFO: Training Metrics: {"loss_text_ce": 2.7493655681610107, "loss_mel_ce": 1.06691575050354, "loss_gpt_total": 1.0944092273712158, "lr": 3.90625e-08, "it": 2591, "step": 1, "steps": 10, "epoch": 253, "iteration_rate": 5.221655607223511}
23-04-17 14:15:01.318 - INFO: Training Metrics: {"loss_text_ce": 2.742051124572754, "loss_mel_ce": 1.0675408840179443, "loss_gpt_total": 1.094961166381836, "lr": 3.90625e-08, "it": 2592, "step": 2, "steps": 10, "epoch": 253, "iteration_rate": 5.411060094833374}
23-04-17 14:15:06.366 - INFO: Training Metrics: {"loss_text_ce": 2.739686965942383, "loss_mel_ce": 1.066691517829895, "loss_gpt_total": 1.0940881967544556, "lr": 3.90625e-08, "it": 2593, "step": 3, "steps": 10, "epoch": 253, "iteration_rate": 5.016056776046753}
23-04-17 14:15:11.357 - INFO: Training Metrics: {"loss_text_ce": 2.7409074306488037, "loss_mel_ce": 1.0659711360931396, "loss_gpt_total": 1.093380093574524, "lr": 3.90625e-08, "it": 2594, "step": 4, "steps": 10, "epoch": 253, "iteration_rate": 4.957892894744873}
23-04-17 14:15:16.480 - INFO: Training Metrics: {"loss_text_ce": 2.745137929916382, "loss_mel_ce": 1.067038893699646, "loss_gpt_total": 1.0944902896881104, "lr": 3.90625e-08, "it": 2595, "step": 5, "steps": 10, "epoch": 253, "iteration_rate": 5.090130567550659}
23-04-17 14:15:21.472 - INFO: Training Metrics: {"loss_text_ce": 2.7493255138397217, "loss_mel_ce": 1.0677977800369263, "loss_gpt_total": 1.0952908992767334, "lr": 3.90625e-08, "it": 2596, "step": 6, "steps": 10, "epoch": 253, "iteration_rate": 4.9602439403533936}
23-04-17 14:15:26.317 - INFO: Training Metrics: {"loss_text_ce": 2.753138303756714, "loss_mel_ce": 1.0672696828842163, "loss_gpt_total": 1.0948010683059692, "lr": 3.90625e-08, "it": 2597, "step": 7, "steps": 10, "epoch": 253, "iteration_rate": 4.81099534034729}
23-04-17 14:15:31.666 - INFO: Training Metrics: {"loss_text_ce": 2.7562127113342285, "loss_mel_ce": 1.0675922632217407, "loss_gpt_total": 1.0951542854309082, "lr": 3.90625e-08, "it": 2598, "step": 8, "steps": 10, "epoch": 253, "iteration_rate": 5.316158771514893}
23-04-17 14:15:36.944 - INFO: Training Metrics: {"loss_text_ce": 2.7580363750457764, "loss_mel_ce": 1.0677703619003296, "loss_gpt_total": 1.0953505039215088, "lr": 3.90625e-08, "it": 2599, "step": 9, "steps": 10, "epoch": 253, "iteration_rate": 5.245838403701782}
23-04-17 14:15:46.781 - INFO: Saving models and training states.
23-04-17 14:15:46.786 - INFO: Training Metrics: {"loss_text_ce": 2.758751392364502, "loss_mel_ce": 1.0678088665008545, "loss_gpt_total": 1.0953962802886963, "lr": 3.90625e-08, "it": 2600, "step": 10, "steps": 10, "epoch": 253, "iteration_rate": 5.0682032108306885}
23-04-17 14:15:52.952 - INFO: Training Metrics: {"loss_text_ce": 2.760568857192993, "loss_mel_ce": 1.068356990814209, "loss_gpt_total": 1.0959625244140625, "lr": 3.90625e-08, "it": 2601, "step": 1, "steps": 10, "epoch": 254, "iteration_rate": 5.312323331832886}
23-04-17 14:15:58.082 - INFO: Training Metrics: {"loss_text_ce": 2.7644691467285156, "loss_mel_ce": 1.0705174207687378, "loss_gpt_total": 1.0981619358062744, "lr": 3.90625e-08, "it": 2602, "step": 2, "steps": 10, "epoch": 254, "iteration_rate": 5.097777366638184}
23-04-17 14:16:02.983 - INFO: Training Metrics: {"loss_text_ce": 2.766695976257324, "loss_mel_ce": 1.0696706771850586, "loss_gpt_total": 1.0973374843597412, "lr": 3.90625e-08, "it": 2603, "step": 3, "steps": 10, "epoch": 254, "iteration_rate": 4.867630958557129}
23-04-17 14:16:08.172 - INFO: Training Metrics: {"loss_text_ce": 2.768364906311035, "loss_mel_ce": 1.0687439441680908, "loss_gpt_total": 1.0964275598526, "lr": 3.90625e-08, "it": 2604, "step": 4, "steps": 10, "epoch": 254, "iteration_rate": 5.157175779342651}
23-04-17 14:16:13.524 - INFO: Training Metrics: {"loss_text_ce": 2.769170045852661, "loss_mel_ce": 1.0692778825759888, "loss_gpt_total": 1.0969696044921875, "lr": 3.90625e-08, "it": 2605, "step": 5, "steps": 10, "epoch": 254, "iteration_rate": 5.318818807601929}
23-04-17 14:16:18.518 - INFO: Training Metrics: {"loss_text_ce": 2.768305540084839, "loss_mel_ce": 1.0687729120254517, "loss_gpt_total": 1.0964560508728027, "lr": 3.90625e-08, "it": 2606, "step": 6, "steps": 10, "epoch": 254, "iteration_rate": 4.961809158325195}
23-04-17 14:16:23.848 - INFO: Training Metrics: {"loss_text_ce": 2.7671923637390137, "loss_mel_ce": 1.0687530040740967, "loss_gpt_total": 1.09642493724823, "lr": 3.90625e-08, "it": 2607, "step": 7, "steps": 10, "epoch": 254, "iteration_rate": 5.297144412994385}
23-04-17 14:16:28.840 - INFO: Training Metrics: {"loss_text_ce": 2.7668309211730957, "loss_mel_ce": 1.068759560585022, "loss_gpt_total": 1.0964279174804688, "lr": 3.90625e-08, "it": 2608, "step": 8, "steps": 10, "epoch": 254, "iteration_rate": 4.9596569538116455}
23-04-17 14:16:34.214 - INFO: Training Metrics: {"loss_text_ce": 2.76586651802063, "loss_mel_ce": 1.0689754486083984, "loss_gpt_total": 1.0966341495513916, "lr": 3.90625e-08, "it": 2609, "step": 9, "steps": 10, "epoch": 254, "iteration_rate": 5.341468811035156}
23-04-17 14:16:39.261 - INFO: Training Metrics: {"loss_text_ce": 2.7656455039978027, "loss_mel_ce": 1.0690040588378906, "loss_gpt_total": 1.0966604948043823, "lr": 3.90625e-08, "it": 2610, "step": 10, "steps": 10, "epoch": 254, "iteration_rate": 5.006791591644287}
23-04-17 14:16:44.917 - INFO: Training Metrics: {"loss_text_ce": 2.759087085723877, "loss_mel_ce": 1.074084758758545, "loss_gpt_total": 1.1016756296157837, "lr": 3.90625e-08, "it": 2611, "step": 1, "steps": 10, "epoch": 255, "iteration_rate": 5.432046890258789}
23-04-17 14:16:50.138 - INFO: Training Metrics: {"loss_text_ce": 2.759664535522461, "loss_mel_ce": 1.073415994644165, "loss_gpt_total": 1.1010127067565918, "lr": 3.90625e-08, "it": 2612, "step": 2, "steps": 10, "epoch": 255, "iteration_rate": 5.187448501586914}
23-04-17 14:16:55.430 - INFO: Training Metrics: {"loss_text_ce": 2.7577247619628906, "loss_mel_ce": 1.072514533996582, "loss_gpt_total": 1.100091814994812, "lr": 3.90625e-08, "it": 2613, "step": 3, "steps": 10, "epoch": 255, "iteration_rate": 5.257400751113892}
23-04-17 14:17:00.572 - INFO: Training Metrics: {"loss_text_ce": 2.756298303604126, "loss_mel_ce": 1.0716874599456787, "loss_gpt_total": 1.0992504358291626, "lr": 3.90625e-08, "it": 2614, "step": 4, "steps": 10, "epoch": 255, "iteration_rate": 5.1096508502960205}
23-04-17 14:17:05.661 - INFO: Training Metrics: {"loss_text_ce": 2.7551767826080322, "loss_mel_ce": 1.0709989070892334, "loss_gpt_total": 1.0985506772994995, "lr": 3.90625e-08, "it": 2615, "step": 5, "steps": 10, "epoch": 255, "iteration_rate": 5.05566143989563}
23-04-17 14:17:10.840 - INFO: Training Metrics: {"loss_text_ce": 2.755391836166382, "loss_mel_ce": 1.070787787437439, "loss_gpt_total": 1.0983415842056274, "lr": 3.90625e-08, "it": 2616, "step": 6, "steps": 10, "epoch": 255, "iteration_rate": 5.145535230636597}
23-04-17 14:17:16.115 - INFO: Training Metrics: {"loss_text_ce": 2.755474328994751, "loss_mel_ce": 1.0708014965057373, "loss_gpt_total": 1.0983561277389526, "lr": 3.90625e-08, "it": 2617, "step": 7, "steps": 10, "epoch": 255, "iteration_rate": 5.240565299987793}
23-04-17 14:17:21.299 - INFO: Training Metrics: {"loss_text_ce": 2.7555131912231445, "loss_mel_ce": 1.0706182718276978, "loss_gpt_total": 1.0981733798980713, "lr": 3.90625e-08, "it": 2618, "step": 8, "steps": 10, "epoch": 255, "iteration_rate": 5.1502861976623535}
23-04-17 14:17:26.483 - INFO: Training Metrics: {"loss_text_ce": 2.756139039993286, "loss_mel_ce": 1.0705252885818481, "loss_gpt_total": 1.0980865955352783, "lr": 3.90625e-08, "it": 2619, "step": 9, "steps": 10, "epoch": 255, "iteration_rate": 5.1515257358551025}
23-04-17 14:17:31.961 - INFO: Training Metrics: {"loss_text_ce": 2.755966901779175, "loss_mel_ce": 1.0704985857009888, "loss_gpt_total": 1.0980581045150757, "lr": 3.90625e-08, "it": 2620, "step": 10, "steps": 10, "epoch": 255, "iteration_rate": 5.443567991256714}
23-04-17 14:17:37.553 - INFO: Training Metrics: {"loss_text_ce": 2.7496302127838135, "loss_mel_ce": 1.0710947513580322, "loss_gpt_total": 1.0985912084579468, "lr": 3.90625e-08, "it": 2621, "step": 1, "steps": 10, "epoch": 256, "iteration_rate": 5.3893656730651855}
23-04-17 14:17:42.630 - INFO: Training Metrics: {"loss_text_ce": 2.7509186267852783, "loss_mel_ce": 1.0703953504562378, "loss_gpt_total": 1.0979046821594238, "lr": 3.90625e-08, "it": 2622, "step": 2, "steps": 10, "epoch": 256, "iteration_rate": 5.040323495864868}
23-04-17 14:17:47.787 - INFO: Training Metrics: {"loss_text_ce": 2.7533295154571533, "loss_mel_ce": 1.0703951120376587, "loss_gpt_total": 1.097928524017334, "lr": 3.90625e-08, "it": 2623, "step": 3, "steps": 10, "epoch": 256, "iteration_rate": 5.120485067367554}
23-04-17 14:17:53.511 - INFO: Training Metrics: {"loss_text_ce": 2.7534101009368896, "loss_mel_ce": 1.0703102350234985, "loss_gpt_total": 1.0978444814682007, "lr": 3.90625e-08, "it": 2624, "step": 4, "steps": 10, "epoch": 256, "iteration_rate": 5.691117286682129}
23-04-17 14:17:58.501 - INFO: Training Metrics: {"loss_text_ce": 2.755002021789551, "loss_mel_ce": 1.0702497959136963, "loss_gpt_total": 1.0978000164031982, "lr": 3.90625e-08, "it": 2625, "step": 5, "steps": 10, "epoch": 256, "iteration_rate": 4.955210208892822}
23-04-17 14:18:03.879 - INFO: Training Metrics: {"loss_text_ce": 2.7553160190582275, "loss_mel_ce": 1.070377230644226, "loss_gpt_total": 1.0979305505752563, "lr": 3.90625e-08, "it": 2626, "step": 6, "steps": 10, "epoch": 256, "iteration_rate": 5.344856023788452}
23-04-17 14:18:09.185 - INFO: Training Metrics: {"loss_text_ce": 2.754870891571045, "loss_mel_ce": 1.0703376531600952, "loss_gpt_total": 1.097886323928833, "lr": 3.90625e-08, "it": 2627, "step": 7, "steps": 10, "epoch": 256, "iteration_rate": 5.272791862487793}
23-04-17 14:18:14.456 - INFO: Training Metrics: {"loss_text_ce": 2.754647970199585, "loss_mel_ce": 1.0705201625823975, "loss_gpt_total": 1.0980666875839233, "lr": 3.90625e-08, "it": 2628, "step": 8, "steps": 10, "epoch": 256, "iteration_rate": 5.237374544143677}
23-04-17 14:18:19.512 - INFO: Training Metrics: {"loss_text_ce": 2.7549991607666016, "loss_mel_ce": 1.0705739259719849, "loss_gpt_total": 1.0981239080429077, "lr": 3.90625e-08, "it": 2629, "step": 9, "steps": 10, "epoch": 256, "iteration_rate": 5.023519039154053}
23-04-17 14:18:24.772 - INFO: Training Metrics: {"loss_text_ce": 2.755019187927246, "loss_mel_ce": 1.0705678462982178, "loss_gpt_total": 1.0981180667877197, "lr": 3.90625e-08, "it": 2630, "step": 10, "steps": 10, "epoch": 256, "iteration_rate": 5.226175546646118}
23-04-17 14:18:30.213 - INFO: Training Metrics: {"loss_text_ce": 2.7564642429351807, "loss_mel_ce": 1.0700132846832275, "loss_gpt_total": 1.0975778102874756, "lr": 3.90625e-08, "it": 2631, "step": 1, "steps": 10, "epoch": 257, "iteration_rate": 5.237426280975342}
23-04-17 14:18:35.239 - INFO: Training Metrics: {"loss_text_ce": 2.7562520503997803, "loss_mel_ce": 1.0693734884262085, "loss_gpt_total": 1.0969359874725342, "lr": 3.90625e-08, "it": 2632, "step": 2, "steps": 10, "epoch": 257, "iteration_rate": 4.992801666259766}
23-04-17 14:18:40.412 - INFO: Training Metrics: {"loss_text_ce": 2.7585020065307617, "loss_mel_ce": 1.0692317485809326, "loss_gpt_total": 1.0968167781829834, "lr": 3.90625e-08, "it": 2633, "step": 3, "steps": 10, "epoch": 257, "iteration_rate": 5.139241456985474}
23-04-17 14:18:45.815 - INFO: Training Metrics: {"loss_text_ce": 2.759730577468872, "loss_mel_ce": 1.0691914558410645, "loss_gpt_total": 1.0967886447906494, "lr": 3.90625e-08, "it": 2634, "step": 4, "steps": 10, "epoch": 257, "iteration_rate": 5.370253324508667}
23-04-17 14:18:51.197 - INFO: Training Metrics: {"loss_text_ce": 2.760343313217163, "loss_mel_ce": 1.069024682044983, "loss_gpt_total": 1.0966280698776245, "lr": 3.90625e-08, "it": 2635, "step": 5, "steps": 10, "epoch": 257, "iteration_rate": 5.348318815231323}
23-04-17 14:18:56.563 - INFO: Training Metrics: {"loss_text_ce": 2.7607505321502686, "loss_mel_ce": 1.0690706968307495, "loss_gpt_total": 1.0966782569885254, "lr": 3.90625e-08, "it": 2636, "step": 6, "steps": 10, "epoch": 257, "iteration_rate": 5.332569599151611}
23-04-17 14:19:02.186 - INFO: Training Metrics: {"loss_text_ce": 2.7600579261779785, "loss_mel_ce": 1.068805456161499, "loss_gpt_total": 1.096406102180481, "lr": 3.90625e-08, "it": 2637, "step": 7, "steps": 10, "epoch": 257, "iteration_rate": 5.585629940032959}
23-04-17 14:19:07.644 - INFO: Training Metrics: {"loss_text_ce": 2.75954532623291, "loss_mel_ce": 1.0686885118484497, "loss_gpt_total": 1.096284031867981, "lr": 3.90625e-08, "it": 2638, "step": 8, "steps": 10, "epoch": 257, "iteration_rate": 5.419545650482178}
23-04-17 14:19:12.923 - INFO: Training Metrics: {"loss_text_ce": 2.7591090202331543, "loss_mel_ce": 1.0684702396392822, "loss_gpt_total": 1.0960612297058105, "lr": 3.90625e-08, "it": 2639, "step": 9, "steps": 10, "epoch": 257, "iteration_rate": 5.246339321136475}
23-04-17 14:19:18.312 - INFO: Training Metrics: {"loss_text_ce": 2.758916139602661, "loss_mel_ce": 1.0686252117156982, "loss_gpt_total": 1.0962142944335938, "lr": 3.90625e-08, "it": 2640, "step": 10, "steps": 10, "epoch": 257, "iteration_rate": 5.3562171459198}
23-04-17 14:19:23.887 - INFO: Training Metrics: {"loss_text_ce": 2.7557244300842285, "loss_mel_ce": 1.0729148387908936, "loss_gpt_total": 1.100472092628479, "lr": 3.90625e-08, "it": 2641, "step": 1, "steps": 10, "epoch": 258, "iteration_rate": 5.35733699798584}
23-04-17 14:19:29.149 - INFO: Training Metrics: {"loss_text_ce": 2.7570338249206543, "loss_mel_ce": 1.0724740028381348, "loss_gpt_total": 1.1000443696975708, "lr": 3.90625e-08, "it": 2642, "step": 2, "steps": 10, "epoch": 258, "iteration_rate": 5.2252771854400635}
23-04-17 14:19:34.705 - INFO: Training Metrics: {"loss_text_ce": 2.756775140762329, "loss_mel_ce": 1.0730115175247192, "loss_gpt_total": 1.1005792617797852, "lr": 3.90625e-08, "it": 2643, "step": 3, "steps": 10, "epoch": 258, "iteration_rate": 5.5228893756866455}
23-04-17 14:19:39.829 - INFO: Training Metrics: {"loss_text_ce": 2.757626533508301, "loss_mel_ce": 1.0737066268920898, "loss_gpt_total": 1.1012829542160034, "lr": 3.90625e-08, "it": 2644, "step": 4, "steps": 10, "epoch": 258, "iteration_rate": 5.091712474822998}
23-04-17 14:19:45.082 - INFO: Training Metrics: {"loss_text_ce": 2.7576231956481934, "loss_mel_ce": 1.0740885734558105, "loss_gpt_total": 1.1016647815704346, "lr": 3.90625e-08, "it": 2645, "step": 5, "steps": 10, "epoch": 258, "iteration_rate": 5.2179787158966064}
23-04-17 14:19:50.344 - INFO: Training Metrics: {"loss_text_ce": 2.7572662830352783, "loss_mel_ce": 1.0744179487228394, "loss_gpt_total": 1.1019905805587769, "lr": 3.90625e-08, "it": 2646, "step": 6, "steps": 10, "epoch": 258, "iteration_rate": 5.22935676574707}
23-04-17 14:19:55.519 - INFO: Training Metrics: {"loss_text_ce": 2.7571358680725098, "loss_mel_ce": 1.0746039152145386, "loss_gpt_total": 1.1021753549575806, "lr": 3.90625e-08, "it": 2647, "step": 7, "steps": 10, "epoch": 258, "iteration_rate": 5.140671014785767}
23-04-17 14:20:00.675 - INFO: Training Metrics: {"loss_text_ce": 2.75707745552063, "loss_mel_ce": 1.074578046798706, "loss_gpt_total": 1.1021488904953003, "lr": 3.90625e-08, "it": 2648, "step": 8, "steps": 10, "epoch": 258, "iteration_rate": 5.1232123374938965}
23-04-17 14:20:06.204 - INFO: Training Metrics: {"loss_text_ce": 2.756305694580078, "loss_mel_ce": 1.0744202136993408, "loss_gpt_total": 1.1019831895828247, "lr": 3.90625e-08, "it": 2649, "step": 9, "steps": 10, "epoch": 258, "iteration_rate": 5.495786190032959}
23-04-17 14:20:11.501 - INFO: Training Metrics: {"loss_text_ce": 2.7553837299346924, "loss_mel_ce": 1.074150800704956, "loss_gpt_total": 1.1017045974731445, "lr": 3.90625e-08, "it": 2650, "step": 10, "steps": 10, "epoch": 258, "iteration_rate": 5.264479637145996}
23-04-17 14:20:16.871 - INFO: Training Metrics: {"loss_text_ce": 2.7446064949035645, "loss_mel_ce": 1.0703792572021484, "loss_gpt_total": 1.0978254079818726, "lr": 3.90625e-08, "it": 2651, "step": 1, "steps": 10, "epoch": 259, "iteration_rate": 5.194794654846191}
23-04-17 14:20:22.185 - INFO: Training Metrics: {"loss_text_ce": 2.7457005977630615, "loss_mel_ce": 1.0712730884552002, "loss_gpt_total": 1.0987303256988525, "lr": 3.90625e-08, "it": 2652, "step": 2, "steps": 10, "epoch": 259, "iteration_rate": 5.281114339828491}
23-04-17 14:20:27.338 - INFO: Training Metrics: {"loss_text_ce": 2.746925115585327, "loss_mel_ce": 1.0722013711929321, "loss_gpt_total": 1.0996708869934082, "lr": 3.90625e-08, "it": 2653, "step": 3, "steps": 10, "epoch": 259, "iteration_rate": 5.119874000549316}
23-04-17 14:20:32.757 - INFO: Training Metrics: {"loss_text_ce": 2.746995687484741, "loss_mel_ce": 1.0728859901428223, "loss_gpt_total": 1.100356101989746, "lr": 3.90625e-08, "it": 2654, "step": 4, "steps": 10, "epoch": 259, "iteration_rate": 5.386828899383545}
23-04-17 14:20:38.012 - INFO: Training Metrics: {"loss_text_ce": 2.7465057373046875, "loss_mel_ce": 1.0732171535491943, "loss_gpt_total": 1.100682258605957, "lr": 3.90625e-08, "it": 2655, "step": 5, "steps": 10, "epoch": 259, "iteration_rate": 5.222622632980347}
23-04-17 14:20:43.470 - INFO: Training Metrics: {"loss_text_ce": 2.746004104614258, "loss_mel_ce": 1.0737441778182983, "loss_gpt_total": 1.1012043952941895, "lr": 3.90625e-08, "it": 2656, "step": 6, "steps": 10, "epoch": 259, "iteration_rate": 5.422454595565796}
23-04-17 14:20:48.605 - INFO: Training Metrics: {"loss_text_ce": 2.745168685913086, "loss_mel_ce": 1.0737804174423218, "loss_gpt_total": 1.1012322902679443, "lr": 3.90625e-08, "it": 2657, "step": 7, "steps": 10, "epoch": 259, "iteration_rate": 5.0972747802734375}
23-04-17 14:20:53.491 - INFO: Training Metrics: {"loss_text_ce": 2.745177745819092, "loss_mel_ce": 1.0736677646636963, "loss_gpt_total": 1.1011197566986084, "lr": 3.90625e-08, "it": 2658, "step": 8, "steps": 10, "epoch": 259, "iteration_rate": 4.853940725326538}
23-04-17 14:20:58.640 - INFO: Training Metrics: {"loss_text_ce": 2.7451090812683105, "loss_mel_ce": 1.0734715461730957, "loss_gpt_total": 1.1009228229522705, "lr": 3.90625e-08, "it": 2659, "step": 9, "steps": 10, "epoch": 259, "iteration_rate": 5.116001605987549}
23-04-17 14:21:03.733 - INFO: Training Metrics: {"loss_text_ce": 2.745321273803711, "loss_mel_ce": 1.0733870267868042, "loss_gpt_total": 1.1008403301239014, "lr": 3.90625e-08, "it": 2660, "step": 10, "steps": 10, "epoch": 259, "iteration_rate": 5.060761213302612}
23-04-17 14:21:08.972 - INFO: Training Metrics: {"loss_text_ce": 2.749234437942505, "loss_mel_ce": 1.0723861455917358, "loss_gpt_total": 1.0998785495758057, "lr": 3.90625e-08, "it": 2661, "step": 1, "steps": 10, "epoch": 260, "iteration_rate": 5.031685829162598}
23-04-17 14:21:13.955 - INFO: Training Metrics: {"loss_text_ce": 2.7503206729888916, "loss_mel_ce": 1.0718656778335571, "loss_gpt_total": 1.0993690490722656, "lr": 3.90625e-08, "it": 2662, "step": 2, "steps": 10, "epoch": 260, "iteration_rate": 4.950758695602417}
23-04-17 14:21:19.046 - INFO: Training Metrics: {"loss_text_ce": 2.7512662410736084, "loss_mel_ce": 1.0719780921936035, "loss_gpt_total": 1.099490761756897, "lr": 3.90625e-08, "it": 2663, "step": 3, "steps": 10, "epoch": 260, "iteration_rate": 5.057015657424927}
23-04-17 14:21:24.404 - INFO: Training Metrics: {"loss_text_ce": 2.752408742904663, "loss_mel_ce": 1.0723649263381958, "loss_gpt_total": 1.0998890399932861, "lr": 3.90625e-08, "it": 2664, "step": 4, "steps": 10, "epoch": 260, "iteration_rate": 5.323716402053833}
23-04-17 14:21:30.033 - INFO: Training Metrics: {"loss_text_ce": 2.753077268600464, "loss_mel_ce": 1.073014259338379, "loss_gpt_total": 1.1005451679229736, "lr": 3.90625e-08, "it": 2665, "step": 5, "steps": 10, "epoch": 260, "iteration_rate": 5.596291542053223}
23-04-17 14:21:35.127 - INFO: Training Metrics: {"loss_text_ce": 2.753772497177124, "loss_mel_ce": 1.0732287168502808, "loss_gpt_total": 1.1007665395736694, "lr": 3.90625e-08, "it": 2666, "step": 6, "steps": 10, "epoch": 260, "iteration_rate": 5.061224699020386}
23-04-17 14:21:40.215 - INFO: Training Metrics: {"loss_text_ce": 2.7542223930358887, "loss_mel_ce": 1.0732543468475342, "loss_gpt_total": 1.1007965803146362, "lr": 3.90625e-08, "it": 2667, "step": 7, "steps": 10, "epoch": 260, "iteration_rate": 5.055445194244385}
23-04-17 14:21:45.252 - INFO: Training Metrics: {"loss_text_ce": 2.7543487548828125, "loss_mel_ce": 1.0730695724487305, "loss_gpt_total": 1.1006132364273071, "lr": 3.90625e-08, "it": 2668, "step": 8, "steps": 10, "epoch": 260, "iteration_rate": 5.003914833068848}
23-04-17 14:21:50.899 - INFO: Training Metrics: {"loss_text_ce": 2.753657341003418, "loss_mel_ce": 1.0729694366455078, "loss_gpt_total": 1.1005061864852905, "lr": 3.90625e-08, "it": 2669, "step": 9, "steps": 10, "epoch": 260, "iteration_rate": 5.614763498306274}
23-04-17 14:21:55.901 - INFO: Training Metrics: {"loss_text_ce": 2.7534077167510986, "loss_mel_ce": 1.072810411453247, "loss_gpt_total": 1.1003445386886597, "lr": 3.90625e-08, "it": 2670, "step": 10, "steps": 10, "epoch": 260, "iteration_rate": 4.969434022903442}
23-04-17 14:22:01.349 - INFO: Training Metrics: {"loss_text_ce": 2.751038074493408, "loss_mel_ce": 1.07041335105896, "loss_gpt_total": 1.097923755645752, "lr": 3.90625e-08, "it": 2671, "step": 1, "steps": 10, "epoch": 261, "iteration_rate": 5.241037845611572}
23-04-17 14:22:06.678 - INFO: Training Metrics: {"loss_text_ce": 2.748927116394043, "loss_mel_ce": 1.070082187652588, "loss_gpt_total": 1.0975714921951294, "lr": 3.90625e-08, "it": 2672, "step": 2, "steps": 10, "epoch": 261, "iteration_rate": 5.296677350997925}
23-04-17 14:22:11.844 - INFO: Training Metrics: {"loss_text_ce": 2.7482032775878906, "loss_mel_ce": 1.0703166723251343, "loss_gpt_total": 1.0977988243103027, "lr": 3.90625e-08, "it": 2673, "step": 3, "steps": 10, "epoch": 261, "iteration_rate": 5.132628917694092}
23-04-17 14:22:16.998 - INFO: Training Metrics: {"loss_text_ce": 2.748155355453491, "loss_mel_ce": 1.070494532585144, "loss_gpt_total": 1.0979760885238647, "lr": 3.90625e-08, "it": 2674, "step": 4, "steps": 10, "epoch": 261, "iteration_rate": 5.120819568634033}
23-04-17 14:22:22.317 - INFO: Training Metrics: {"loss_text_ce": 2.7474794387817383, "loss_mel_ce": 1.0708740949630737, "loss_gpt_total": 1.09834885597229, "lr": 3.90625e-08, "it": 2675, "step": 5, "steps": 10, "epoch": 261, "iteration_rate": 5.2858216762542725}
23-04-17 14:22:27.874 - INFO: Training Metrics: {"loss_text_ce": 2.746453285217285, "loss_mel_ce": 1.0711932182312012, "loss_gpt_total": 1.0986577272415161, "lr": 3.90625e-08, "it": 2676, "step": 6, "steps": 10, "epoch": 261, "iteration_rate": 5.524085760116577}
23-04-17 14:22:33.014 - INFO: Training Metrics: {"loss_text_ce": 2.7462058067321777, "loss_mel_ce": 1.0714534521102905, "loss_gpt_total": 1.098915696144104, "lr": 3.90625e-08, "it": 2677, "step": 7, "steps": 10, "epoch": 261, "iteration_rate": 5.107058048248291}
23-04-17 14:22:38.360 - INFO: Training Metrics: {"loss_text_ce": 2.746140480041504, "loss_mel_ce": 1.0717366933822632, "loss_gpt_total": 1.0991982221603394, "lr": 3.90625e-08, "it": 2678, "step": 8, "steps": 10, "epoch": 261, "iteration_rate": 5.312972784042358}
23-04-17 14:22:43.424 - INFO: Training Metrics: {"loss_text_ce": 2.746290445327759, "loss_mel_ce": 1.0719457864761353, "loss_gpt_total": 1.099408745765686, "lr": 3.90625e-08, "it": 2679, "step": 9, "steps": 10, "epoch": 261, "iteration_rate": 5.031108379364014}
23-04-17 14:22:48.378 - INFO: Training Metrics: {"loss_text_ce": 2.7464804649353027, "loss_mel_ce": 1.071955919265747, "loss_gpt_total": 1.0994207859039307, "lr": 3.90625e-08, "it": 2680, "step": 10, "steps": 10, "epoch": 261, "iteration_rate": 4.919164180755615}
23-04-17 14:22:53.641 - INFO: Training Metrics: {"loss_text_ce": 2.7536871433258057, "loss_mel_ce": 1.0725868940353394, "loss_gpt_total": 1.100123643875122, "lr": 3.90625e-08, "it": 2681, "step": 1, "steps": 10, "epoch": 262, "iteration_rate": 5.0520946979522705}
23-04-17 14:22:58.857 - INFO: Training Metrics: {"loss_text_ce": 2.7535593509674072, "loss_mel_ce": 1.073226809501648, "loss_gpt_total": 1.1007622480392456, "lr": 3.90625e-08, "it": 2682, "step": 2, "steps": 10, "epoch": 262, "iteration_rate": 5.182878732681274}
23-04-17 14:23:04.024 - INFO: Training Metrics: {"loss_text_ce": 2.753521203994751, "loss_mel_ce": 1.0736933946609497, "loss_gpt_total": 1.1012283563613892, "lr": 3.90625e-08, "it": 2683, "step": 3, "steps": 10, "epoch": 262, "iteration_rate": 5.134616374969482}
23-04-17 14:23:09.494 - INFO: Training Metrics: {"loss_text_ce": 2.7523913383483887, "loss_mel_ce": 1.073890209197998, "loss_gpt_total": 1.1014139652252197, "lr": 3.90625e-08, "it": 2684, "step": 4, "steps": 10, "epoch": 262, "iteration_rate": 5.436810493469238}
23-04-17 14:23:14.778 - INFO: Training Metrics: {"loss_text_ce": 2.7515101432800293, "loss_mel_ce": 1.0741255283355713, "loss_gpt_total": 1.1016404628753662, "lr": 3.90625e-08, "it": 2685, "step": 5, "steps": 10, "epoch": 262, "iteration_rate": 5.251472234725952}
23-04-17 14:23:20.008 - INFO: Training Metrics: {"loss_text_ce": 2.7501771450042725, "loss_mel_ce": 1.0742355585098267, "loss_gpt_total": 1.1017372608184814, "lr": 3.90625e-08, "it": 2686, "step": 6, "steps": 10, "epoch": 262, "iteration_rate": 5.1974146366119385}
23-04-17 14:23:24.928 - INFO: Training Metrics: {"loss_text_ce": 2.749573230743408, "loss_mel_ce": 1.074187994003296, "loss_gpt_total": 1.101683497428894, "lr": 3.90625e-08, "it": 2687, "step": 7, "steps": 10, "epoch": 262, "iteration_rate": 4.886929035186768}
23-04-17 14:23:30.395 - INFO: Training Metrics: {"loss_text_ce": 2.748560905456543, "loss_mel_ce": 1.074157953262329, "loss_gpt_total": 1.1016433238983154, "lr": 3.90625e-08, "it": 2688, "step": 8, "steps": 10, "epoch": 262, "iteration_rate": 5.433473110198975}
23-04-17 14:23:35.429 - INFO: Training Metrics: {"loss_text_ce": 2.7480955123901367, "loss_mel_ce": 1.0739604234695435, "loss_gpt_total": 1.1014412641525269, "lr": 3.90625e-08, "it": 2689, "step": 9, "steps": 10, "epoch": 262, "iteration_rate": 5.000994443893433}
23-04-17 14:23:40.580 - INFO: Training Metrics: {"loss_text_ce": 2.7479584217071533, "loss_mel_ce": 1.0738221406936646, "loss_gpt_total": 1.1013015508651733, "lr": 3.90625e-08, "it": 2690, "step": 10, "steps": 10, "epoch": 262, "iteration_rate": 5.11034893989563}
23-04-17 14:23:46.231 - INFO: Training Metrics: {"loss_text_ce": 2.7463362216949463, "loss_mel_ce": 1.0720181465148926, "loss_gpt_total": 1.0994817018508911, "lr": 3.90625e-08, "it": 2691, "step": 1, "steps": 10, "epoch": 263, "iteration_rate": 5.425989627838135}
23-04-17 14:23:51.638 - INFO: Training Metrics: {"loss_text_ce": 2.7436580657958984, "loss_mel_ce": 1.0718204975128174, "loss_gpt_total": 1.099257230758667, "lr": 3.90625e-08, "it": 2692, "step": 2, "steps": 10, "epoch": 263, "iteration_rate": 5.373342037200928}
23-04-17 14:23:57.006 - INFO: Training Metrics: {"loss_text_ce": 2.7432658672332764, "loss_mel_ce": 1.0723540782928467, "loss_gpt_total": 1.0997868776321411, "lr": 3.90625e-08, "it": 2693, "step": 3, "steps": 10, "epoch": 263, "iteration_rate": 5.335202217102051}
23-04-17 14:24:02.322 - INFO: Training Metrics: {"loss_text_ce": 2.7418079376220703, "loss_mel_ce": 1.0724225044250488, "loss_gpt_total": 1.0998406410217285, "lr": 3.90625e-08, "it": 2694, "step": 4, "steps": 10, "epoch": 263, "iteration_rate": 5.282503366470337}
23-04-17 14:24:07.384 - INFO: Training Metrics: {"loss_text_ce": 2.7406973838806152, "loss_mel_ce": 1.072425365447998, "loss_gpt_total": 1.0998324155807495, "lr": 3.90625e-08, "it": 2695, "step": 5, "steps": 10, "epoch": 263, "iteration_rate": 5.029027223587036}
23-04-17 14:24:12.647 - INFO: Training Metrics: {"loss_text_ce": 2.7396488189697266, "loss_mel_ce": 1.0720161199569702, "loss_gpt_total": 1.0994126796722412, "lr": 3.90625e-08, "it": 2696, "step": 6, "steps": 10, "epoch": 263, "iteration_rate": 5.226016998291016}
23-04-17 14:24:18.329 - INFO: Training Metrics: {"loss_text_ce": 2.7390122413635254, "loss_mel_ce": 1.0719932317733765, "loss_gpt_total": 1.0993834733963013, "lr": 3.90625e-08, "it": 2697, "step": 7, "steps": 10, "epoch": 263, "iteration_rate": 5.645751237869263}
23-04-17 14:24:23.753 - INFO: Training Metrics: {"loss_text_ce": 2.7389729022979736, "loss_mel_ce": 1.0720429420471191, "loss_gpt_total": 1.0994327068328857, "lr": 3.90625e-08, "it": 2698, "step": 8, "steps": 10, "epoch": 263, "iteration_rate": 5.388461589813232}
23-04-17 14:24:29.223 - INFO: Training Metrics: {"loss_text_ce": 2.739339828491211, "loss_mel_ce": 1.0720254182815552, "loss_gpt_total": 1.0994187593460083, "lr": 3.90625e-08, "it": 2699, "step": 9, "steps": 10, "epoch": 263, "iteration_rate": 5.43429160118103}
23-04-17 14:24:39.381 - INFO: Saving models and training states.
23-04-17 14:24:39.387 - INFO: Training Metrics: {"loss_text_ce": 2.739698886871338, "loss_mel_ce": 1.0721077919006348, "loss_gpt_total": 1.0995047092437744, "lr": 3.90625e-08, "it": 2700, "step": 10, "steps": 10, "epoch": 263, "iteration_rate": 5.63767671585083}
23-04-17 14:24:44.927 - INFO: Training Metrics: {"loss_text_ce": 2.743736982345581, "loss_mel_ce": 1.0710986852645874, "loss_gpt_total": 1.0985360145568848, "lr": 3.90625e-08, "it": 2701, "step": 1, "steps": 10, "epoch": 264, "iteration_rate": 5.329461097717285}