23-04-17 13:49:29.510 - INFO: name: louise model: extensibletrainer scale: 1 gpu_ids: [0] start_step: 0 checkpointing_enabled: True fp16: True bitsandbytes: True gpus: 1 datasets:[ train:[ name: training n_workers: 2 batch_size: 28 mode: paired_voice_audio path: ./training/louise/train.txt fetcher_mode: ['lj'] phase: train max_wav_length: 255995 max_text_length: 200 sample_rate: 22050 load_conditioning: True num_conditioning_candidates: 2 conditioning_length: 44000 use_bpe_tokenizer: True tokenizer_vocab: ./modules/tortoise-tts/tortoise/data/tokenizer.json load_aligned_codes: False data_type: img ] val:[ name: validation n_workers: 2 batch_size: 7 mode: paired_voice_audio path: ./training/louise/validation.txt fetcher_mode: ['lj'] phase: val max_wav_length: 255995 max_text_length: 200 sample_rate: 22050 load_conditioning: True num_conditioning_candidates: 2 conditioning_length: 44000 use_bpe_tokenizer: True tokenizer_vocab: ./modules/tortoise-tts/tortoise/data/tokenizer.json load_aligned_codes: False data_type: img ] ] steps:[ gpt_train:[ training: gpt loss_log_buffer: 500 optimizer: adamw optimizer_params:[ lr: 1e-05 weight_decay: 0.01 beta1: 0.9 beta2: 0.96 ] clip_grad_eps: 4 injectors:[ paired_to_mel:[ type: torch_mel_spectrogram mel_norm_file: ./modules/tortoise-tts/tortoise/data/mel_norms.pth in: wav out: paired_mel ] paired_cond_to_mel:[ type: for_each subtype: torch_mel_spectrogram mel_norm_file: ./modules/tortoise-tts/tortoise/data/mel_norms.pth in: conditioning out: paired_conditioning_mel ] to_codes:[ type: discrete_token in: paired_mel out: paired_mel_codes dvae_config: ./models/tortoise/train_diffusion_vocoder_22k_level.yml ] paired_fwd_text:[ type: generator generator: gpt in: ['paired_conditioning_mel', 'padded_text', 'text_lengths', 'paired_mel_codes', 'wav_lengths'] out: ['loss_text_ce', 'loss_mel_ce', 'logits'] ] ] losses:[ text_ce:[ type: direct weight: 0.01 key: loss_text_ce ] mel_ce:[ type: direct weight: 1 key: loss_mel_ce ] ] ] ] networks:[ gpt:[ type: generator which_model_G: unified_voice2 kwargs:[ layers: 30 model_dim: 1024 heads: 16 max_text_tokens: 402 max_mel_tokens: 604 max_conditioning_inputs: 2 mel_length_compression: 1024 number_text_tokens: 256 number_mel_codes: 8194 start_mel_token: 8192 stop_mel_token: 8193 start_text_token: 255 train_solo_embeddings: False use_mel_codes_as_input: True checkpointing: True tortoise_compat: True ] ] ] path:[ strict_load: True resume_state: ./training/louise/finetune/training_state//2400.state root: ./ experiments_root: ./training/louise/finetune models: ./training/louise/finetune/models training_state: ./training/louise/finetune/training_state log: ./training/louise/finetune val_images: ./training/louise/finetune/val_images ] train:[ niter: 2750 warmup_iter: -1 mega_batch_factor: 4 val_freq: 100 ema_enabled: False default_lr_scheme: MultiStepLR gen_lr_steps: [2, 4, 9, 18, 25, 33, 50, 59] lr_gamma: 0.5 ] eval:[ pure: False output_state: gen ] logger:[ save_checkpoint_freq: 100 visuals: ['gen', 'mel'] visual_debug_rate: 1100 is_mel_spectrogram: True ] is_train: True dist: False 23-04-17 13:49:29.510 - INFO: Set model [gpt] to ./training/louise/finetune/models/2400_gpt.pth 23-04-17 13:49:29.510 - INFO: Random seed: 5264 23-04-17 13:49:30.226 - INFO: Number of training data elements: 293, iters: 11 23-04-17 13:49:30.227 - INFO: Total epochs needed: 250 for iters 2,750 23-04-17 13:49:37.543 - INFO: Loading model for [./training/louise/finetune/models/2400_gpt.pth] 23-04-17 13:49:41.954 - INFO: Resuming training from epoch: 236, iter: 2400. 23-04-17 13:49:42.009 - INFO: Start training from epoch: 236, iter: 2400 23-04-17 13:49:50.649 - INFO: Training Metrics: {"loss_text_ce": 3.4746131896972656, "loss_mel_ce": 1.2423263788223267, "loss_gpt_total": 1.277072548866272, "lr": 3.90625e-08, "it": 2401, "step": 1, "steps": 10, "epoch": 236, "iteration_rate": 8.207978010177612} 23-04-17 13:49:55.846 - INFO: Training Metrics: {"loss_text_ce": 3.24749493598938, "loss_mel_ce": 1.1895654201507568, "loss_gpt_total": 1.2220404148101807, "lr": 3.90625e-08, "it": 2402, "step": 2, "steps": 10, "epoch": 236, "iteration_rate": 5.163238048553467} 23-04-17 13:50:01.056 - INFO: Training Metrics: {"loss_text_ce": 3.175556182861328, "loss_mel_ce": 1.14871084690094, "loss_gpt_total": 1.1804664134979248, "lr": 3.90625e-08, "it": 2403, "step": 3, "steps": 10, "epoch": 236, "iteration_rate": 5.178141355514526} 23-04-17 13:50:05.977 - INFO: Training Metrics: {"loss_text_ce": 3.136194944381714, "loss_mel_ce": 1.1244022846221924, "loss_gpt_total": 1.1557642221450806, "lr": 3.90625e-08, "it": 2404, "step": 4, "steps": 10, "epoch": 236, "iteration_rate": 4.889042854309082} 23-04-17 13:50:11.308 - INFO: Training Metrics: {"loss_text_ce": 3.0912458896636963, "loss_mel_ce": 1.1149165630340576, "loss_gpt_total": 1.1458289623260498, "lr": 3.90625e-08, "it": 2405, "step": 5, "steps": 10, "epoch": 236, "iteration_rate": 5.2988622188568115} 23-04-17 13:50:16.548 - INFO: Training Metrics: {"loss_text_ce": 3.0554256439208984, "loss_mel_ce": 1.1106914281845093, "loss_gpt_total": 1.141245722770691, "lr": 3.90625e-08, "it": 2406, "step": 6, "steps": 10, "epoch": 236, "iteration_rate": 5.208253383636475} 23-04-17 13:50:21.833 - INFO: Training Metrics: {"loss_text_ce": 3.025360107421875, "loss_mel_ce": 1.1108816862106323, "loss_gpt_total": 1.141135334968567, "lr": 3.90625e-08, "it": 2407, "step": 7, "steps": 10, "epoch": 236, "iteration_rate": 5.25328803062439} 23-04-17 13:50:26.932 - INFO: Training Metrics: {"loss_text_ce": 3.0010077953338623, "loss_mel_ce": 1.11048424243927, "loss_gpt_total": 1.1404943466186523, "lr": 3.90625e-08, "it": 2408, "step": 8, "steps": 10, "epoch": 236, "iteration_rate": 5.067497491836548} 23-04-17 13:50:32.128 - INFO: Training Metrics: {"loss_text_ce": 2.9782392978668213, "loss_mel_ce": 1.107712984085083, "loss_gpt_total": 1.1374953985214233, "lr": 3.90625e-08, "it": 2409, "step": 9, "steps": 10, "epoch": 236, "iteration_rate": 5.162326097488403} 23-04-17 13:50:37.356 - INFO: Training Metrics: {"loss_text_ce": 2.958953619003296, "loss_mel_ce": 1.1058038473129272, "loss_gpt_total": 1.135393500328064, "lr": 3.90625e-08, "it": 2410, "step": 10, "steps": 10, "epoch": 236, "iteration_rate": 5.195818662643433} 23-04-17 13:50:42.671 - INFO: Training Metrics: {"loss_text_ce": 2.7755837440490723, "loss_mel_ce": 1.0835926532745361, "loss_gpt_total": 1.1113486289978027, "lr": 3.90625e-08, "it": 2411, "step": 1, "steps": 10, "epoch": 237, "iteration_rate": 5.11469030380249} 23-04-17 13:50:47.740 - INFO: Training Metrics: {"loss_text_ce": 2.794421672821045, "loss_mel_ce": 1.089477300643921, "loss_gpt_total": 1.1174216270446777, "lr": 3.90625e-08, "it": 2412, "step": 2, "steps": 10, "epoch": 237, "iteration_rate": 5.03676438331604} 23-04-17 13:50:52.765 - INFO: Training Metrics: {"loss_text_ce": 2.809199571609497, "loss_mel_ce": 1.0874309539794922, "loss_gpt_total": 1.1155229806900024, "lr": 3.90625e-08, "it": 2413, "step": 3, "steps": 10, "epoch": 237, "iteration_rate": 4.992449045181274} 23-04-17 13:50:57.795 - INFO: Training Metrics: {"loss_text_ce": 2.822309970855713, "loss_mel_ce": 1.0855480432510376, "loss_gpt_total": 1.1137712001800537, "lr": 3.90625e-08, "it": 2414, "step": 4, "steps": 10, "epoch": 237, "iteration_rate": 4.995181083679199} 23-04-17 13:51:03.239 - INFO: Training Metrics: {"loss_text_ce": 2.82942795753479, "loss_mel_ce": 1.0864624977111816, "loss_gpt_total": 1.1147568225860596, "lr": 3.90625e-08, "it": 2415, "step": 5, "steps": 10, "epoch": 237, "iteration_rate": 5.411502122879028} 23-04-17 13:51:08.304 - INFO: Training Metrics: {"loss_text_ce": 2.83489990234375, "loss_mel_ce": 1.086269497871399, "loss_gpt_total": 1.1146186590194702, "lr": 3.90625e-08, "it": 2416, "step": 6, "steps": 10, "epoch": 237, "iteration_rate": 5.0323567390441895} 23-04-17 13:51:13.550 - INFO: Training Metrics: {"loss_text_ce": 2.834895610809326, "loss_mel_ce": 1.0858455896377563, "loss_gpt_total": 1.114194631576538, "lr": 3.90625e-08, "it": 2417, "step": 7, "steps": 10, "epoch": 237, "iteration_rate": 5.214161396026611} 23-04-17 13:51:19.087 - INFO: Training Metrics: {"loss_text_ce": 2.8335671424865723, "loss_mel_ce": 1.0858969688415527, "loss_gpt_total": 1.1142326593399048, "lr": 3.90625e-08, "it": 2418, "step": 8, "steps": 10, "epoch": 237, "iteration_rate": 5.503908395767212} 23-04-17 13:51:24.225 - INFO: Training Metrics: {"loss_text_ce": 2.8308513164520264, "loss_mel_ce": 1.0855616331100464, "loss_gpt_total": 1.1138701438903809, "lr": 3.90625e-08, "it": 2419, "step": 9, "steps": 10, "epoch": 237, "iteration_rate": 5.105556964874268} 23-04-17 13:51:29.465 - INFO: Training Metrics: {"loss_text_ce": 2.8268299102783203, "loss_mel_ce": 1.085270643234253, "loss_gpt_total": 1.1135388612747192, "lr": 3.90625e-08, "it": 2420, "step": 10, "steps": 10, "epoch": 237, "iteration_rate": 5.208269119262695} 23-04-17 13:51:34.553 - INFO: Training Metrics: {"loss_text_ce": 2.816218852996826, "loss_mel_ce": 1.08180570602417, "loss_gpt_total": 1.109967827796936, "lr": 3.90625e-08, "it": 2421, "step": 1, "steps": 10, "epoch": 238, "iteration_rate": 4.868499279022217} 23-04-17 13:51:39.930 - INFO: Training Metrics: {"loss_text_ce": 2.8100357055664062, "loss_mel_ce": 1.0812129974365234, "loss_gpt_total": 1.1093133687973022, "lr": 3.90625e-08, "it": 2422, "step": 2, "steps": 10, "epoch": 238, "iteration_rate": 5.341215372085571} 23-04-17 13:51:45.503 - INFO: Training Metrics: {"loss_text_ce": 2.8005330562591553, "loss_mel_ce": 1.081521987915039, "loss_gpt_total": 1.1095272302627563, "lr": 3.90625e-08, "it": 2423, "step": 3, "steps": 10, "epoch": 238, "iteration_rate": 5.539278745651245} 23-04-17 13:51:51.112 - INFO: Training Metrics: {"loss_text_ce": 2.797447919845581, "loss_mel_ce": 1.0858393907546997, "loss_gpt_total": 1.1138137578964233, "lr": 3.90625e-08, "it": 2424, "step": 4, "steps": 10, "epoch": 238, "iteration_rate": 5.574002027511597} 23-04-17 13:51:56.425 - INFO: Training Metrics: {"loss_text_ce": 2.7952959537506104, "loss_mel_ce": 1.0881774425506592, "loss_gpt_total": 1.1161302328109741, "lr": 3.90625e-08, "it": 2425, "step": 5, "steps": 10, "epoch": 238, "iteration_rate": 5.280324697494507} 23-04-17 13:52:01.703 - INFO: Training Metrics: {"loss_text_ce": 2.792778730392456, "loss_mel_ce": 1.089535117149353, "loss_gpt_total": 1.1174627542495728, "lr": 3.90625e-08, "it": 2426, "step": 6, "steps": 10, "epoch": 238, "iteration_rate": 5.243861675262451} 23-04-17 13:52:06.889 - INFO: Training Metrics: {"loss_text_ce": 2.7921149730682373, "loss_mel_ce": 1.0904239416122437, "loss_gpt_total": 1.1183449029922485, "lr": 3.90625e-08, "it": 2427, "step": 7, "steps": 10, "epoch": 238, "iteration_rate": 5.153138875961304} 23-04-17 13:52:11.772 - INFO: Training Metrics: {"loss_text_ce": 2.79276967048645, "loss_mel_ce": 1.0901685953140259, "loss_gpt_total": 1.1180962324142456, "lr": 3.90625e-08, "it": 2428, "step": 8, "steps": 10, "epoch": 238, "iteration_rate": 4.851017236709595} 23-04-17 13:52:17.041 - INFO: Training Metrics: {"loss_text_ce": 2.7913646697998047, "loss_mel_ce": 1.0898327827453613, "loss_gpt_total": 1.1177464723587036, "lr": 3.90625e-08, "it": 2429, "step": 9, "steps": 10, "epoch": 238, "iteration_rate": 5.236605167388916} 23-04-17 13:52:22.310 - INFO: Training Metrics: {"loss_text_ce": 2.7895336151123047, "loss_mel_ce": 1.0893785953521729, "loss_gpt_total": 1.1172738075256348, "lr": 3.90625e-08, "it": 2430, "step": 10, "steps": 10, "epoch": 238, "iteration_rate": 5.236731052398682} 23-04-17 13:52:27.550 - INFO: Training Metrics: {"loss_text_ce": 2.7874391078948975, "loss_mel_ce": 1.0837223529815674, "loss_gpt_total": 1.111596703529358, "lr": 3.90625e-08, "it": 2431, "step": 1, "steps": 10, "epoch": 239, "iteration_rate": 5.052397012710571} 23-04-17 13:52:32.848 - INFO: Training Metrics: {"loss_text_ce": 2.786409616470337, "loss_mel_ce": 1.0857782363891602, "loss_gpt_total": 1.1136422157287598, "lr": 3.90625e-08, "it": 2432, "step": 2, "steps": 10, "epoch": 239, "iteration_rate": 5.265899181365967} 23-04-17 13:52:37.839 - INFO: Training Metrics: {"loss_text_ce": 2.7874820232391357, "loss_mel_ce": 1.0847398042678833, "loss_gpt_total": 1.112614631652832, "lr": 3.90625e-08, "it": 2433, "step": 3, "steps": 10, "epoch": 239, "iteration_rate": 4.9579102993011475} 23-04-17 13:52:42.813 - INFO: Training Metrics: {"loss_text_ce": 2.7888998985290527, "loss_mel_ce": 1.083701491355896, "loss_gpt_total": 1.1115903854370117, "lr": 3.90625e-08, "it": 2434, "step": 4, "steps": 10, "epoch": 239, "iteration_rate": 4.9409966468811035} 23-04-17 13:52:47.852 - INFO: Training Metrics: {"loss_text_ce": 2.788721799850464, "loss_mel_ce": 1.082120418548584, "loss_gpt_total": 1.110007643699646, "lr": 3.90625e-08, "it": 2435, "step": 5, "steps": 10, "epoch": 239, "iteration_rate": 5.003685235977173} 23-04-17 13:52:53.172 - INFO: Training Metrics: {"loss_text_ce": 2.78889536857605, "loss_mel_ce": 1.0810743570327759, "loss_gpt_total": 1.1089633703231812, "lr": 3.90625e-08, "it": 2436, "step": 6, "steps": 10, "epoch": 239, "iteration_rate": 5.285303831100464} 23-04-17 13:52:58.502 - INFO: Training Metrics: {"loss_text_ce": 2.788128614425659, "loss_mel_ce": 1.0803388357162476, "loss_gpt_total": 1.108220100402832, "lr": 3.90625e-08, "it": 2437, "step": 7, "steps": 10, "epoch": 239, "iteration_rate": 5.296780824661255} 23-04-17 13:53:04.004 - INFO: Training Metrics: {"loss_text_ce": 2.785921096801758, "loss_mel_ce": 1.0799109935760498, "loss_gpt_total": 1.1077700853347778, "lr": 3.90625e-08, "it": 2438, "step": 8, "steps": 10, "epoch": 239, "iteration_rate": 5.470376014709473} 23-04-17 13:53:09.375 - INFO: Training Metrics: {"loss_text_ce": 2.7852134704589844, "loss_mel_ce": 1.0806052684783936, "loss_gpt_total": 1.1084574460983276, "lr": 3.90625e-08, "it": 2439, "step": 9, "steps": 10, "epoch": 239, "iteration_rate": 5.3386523723602295} 23-04-17 13:53:14.589 - INFO: Training Metrics: {"loss_text_ce": 2.7854652404785156, "loss_mel_ce": 1.0808076858520508, "loss_gpt_total": 1.1086622476577759, "lr": 3.90625e-08, "it": 2440, "step": 10, "steps": 10, "epoch": 239, "iteration_rate": 5.179132461547852} 23-04-17 13:53:20.490 - INFO: Training Metrics: {"loss_text_ce": 2.781421184539795, "loss_mel_ce": 1.0885652303695679, "loss_gpt_total": 1.1163793802261353, "lr": 3.90625e-08, "it": 2441, "step": 1, "steps": 10, "epoch": 240, "iteration_rate": 5.656210660934448} 23-04-17 13:53:25.913 - INFO: Training Metrics: {"loss_text_ce": 2.7778480052948, "loss_mel_ce": 1.0884969234466553, "loss_gpt_total": 1.1162753105163574, "lr": 3.90625e-08, "it": 2442, "step": 2, "steps": 10, "epoch": 240, "iteration_rate": 5.3907201290130615} 23-04-17 13:53:31.229 - INFO: Training Metrics: {"loss_text_ce": 2.775604248046875, "loss_mel_ce": 1.0878742933273315, "loss_gpt_total": 1.1156302690505981, "lr": 3.90625e-08, "it": 2443, "step": 3, "steps": 10, "epoch": 240, "iteration_rate": 5.277255058288574} 23-04-17 13:53:36.539 - INFO: Training Metrics: {"loss_text_ce": 2.7758569717407227, "loss_mel_ce": 1.0868442058563232, "loss_gpt_total": 1.11460280418396, "lr": 3.90625e-08, "it": 2444, "step": 4, "steps": 10, "epoch": 240, "iteration_rate": 5.274013519287109} 23-04-17 13:53:41.798 - INFO: Training Metrics: {"loss_text_ce": 2.775094509124756, "loss_mel_ce": 1.0857675075531006, "loss_gpt_total": 1.1135185956954956, "lr": 3.90625e-08, "it": 2445, "step": 5, "steps": 10, "epoch": 240, "iteration_rate": 5.221697568893433} 23-04-17 13:53:46.741 - INFO: Training Metrics: {"loss_text_ce": 2.7750418186187744, "loss_mel_ce": 1.0847309827804565, "loss_gpt_total": 1.1124814748764038, "lr": 3.90625e-08, "it": 2446, "step": 6, "steps": 10, "epoch": 240, "iteration_rate": 4.91075873374939} 23-04-17 13:53:52.113 - INFO: Training Metrics: {"loss_text_ce": 2.77423357963562, "loss_mel_ce": 1.0838522911071777, "loss_gpt_total": 1.111594796180725, "lr": 3.90625e-08, "it": 2447, "step": 7, "steps": 10, "epoch": 240, "iteration_rate": 5.339819431304932} 23-04-17 13:53:57.472 - INFO: Training Metrics: {"loss_text_ce": 2.77323317527771, "loss_mel_ce": 1.0833239555358887, "loss_gpt_total": 1.1110563278198242, "lr": 3.90625e-08, "it": 2448, "step": 8, "steps": 10, "epoch": 240, "iteration_rate": 5.325524568557739} 23-04-17 13:54:02.690 - INFO: Training Metrics: {"loss_text_ce": 2.7734174728393555, "loss_mel_ce": 1.0826973915100098, "loss_gpt_total": 1.1104316711425781, "lr": 3.90625e-08, "it": 2449, "step": 9, "steps": 10, "epoch": 240, "iteration_rate": 5.183857440948486} 23-04-17 13:54:07.911 - INFO: Training Metrics: {"loss_text_ce": 2.7743046283721924, "loss_mel_ce": 1.0823348760604858, "loss_gpt_total": 1.1100780963897705, "lr": 3.90625e-08, "it": 2450, "step": 10, "steps": 10, "epoch": 240, "iteration_rate": 5.185778617858887} 23-04-17 13:54:13.453 - INFO: Training Metrics: {"loss_text_ce": 2.788823127746582, "loss_mel_ce": 1.079003095626831, "loss_gpt_total": 1.106891393661499, "lr": 3.90625e-08, "it": 2451, "step": 1, "steps": 10, "epoch": 241, "iteration_rate": 5.322179079055786} 23-04-17 13:54:18.681 - INFO: Training Metrics: {"loss_text_ce": 2.786418914794922, "loss_mel_ce": 1.0771527290344238, "loss_gpt_total": 1.1050169467926025, "lr": 3.90625e-08, "it": 2452, "step": 2, "steps": 10, "epoch": 241, "iteration_rate": 5.19692325592041} 23-04-17 13:54:24.131 - INFO: Training Metrics: {"loss_text_ce": 2.7870852947235107, "loss_mel_ce": 1.0770740509033203, "loss_gpt_total": 1.1049448251724243, "lr": 3.90625e-08, "it": 2453, "step": 3, "steps": 10, "epoch": 241, "iteration_rate": 5.418029546737671} 23-04-17 13:54:29.568 - INFO: Training Metrics: {"loss_text_ce": 2.786604881286621, "loss_mel_ce": 1.0768581628799438, "loss_gpt_total": 1.1047241687774658, "lr": 3.90625e-08, "it": 2454, "step": 4, "steps": 10, "epoch": 241, "iteration_rate": 5.4017558097839355} 23-04-17 13:54:35.078 - INFO: Training Metrics: {"loss_text_ce": 2.785581588745117, "loss_mel_ce": 1.0763394832611084, "loss_gpt_total": 1.104195237159729, "lr": 3.90625e-08, "it": 2455, "step": 5, "steps": 10, "epoch": 241, "iteration_rate": 5.476755142211914} 23-04-17 13:54:40.624 - INFO: Training Metrics: {"loss_text_ce": 2.7839138507843018, "loss_mel_ce": 1.0762677192687988, "loss_gpt_total": 1.1041067838668823, "lr": 3.90625e-08, "it": 2456, "step": 6, "steps": 10, "epoch": 241, "iteration_rate": 5.511096239089966} 23-04-17 13:54:46.219 - INFO: Training Metrics: {"loss_text_ce": 2.78204607963562, "loss_mel_ce": 1.076330542564392, "loss_gpt_total": 1.1041510105133057, "lr": 3.90625e-08, "it": 2457, "step": 7, "steps": 10, "epoch": 241, "iteration_rate": 5.562684535980225} 23-04-17 13:54:51.205 - INFO: Training Metrics: {"loss_text_ce": 2.7814316749572754, "loss_mel_ce": 1.0762310028076172, "loss_gpt_total": 1.1040453910827637, "lr": 3.90625e-08, "it": 2458, "step": 8, "steps": 10, "epoch": 241, "iteration_rate": 4.953611373901367} 23-04-17 13:54:56.612 - INFO: Training Metrics: {"loss_text_ce": 2.7806501388549805, "loss_mel_ce": 1.0763027667999268, "loss_gpt_total": 1.104109287261963, "lr": 3.90625e-08, "it": 2459, "step": 9, "steps": 10, "epoch": 241, "iteration_rate": 5.37419581413269} 23-04-17 13:55:02.350 - INFO: Training Metrics: {"loss_text_ce": 2.780153751373291, "loss_mel_ce": 1.0763309001922607, "loss_gpt_total": 1.1041324138641357, "lr": 3.90625e-08, "it": 2460, "step": 10, "steps": 10, "epoch": 241, "iteration_rate": 5.702569246292114} 23-04-17 13:55:07.924 - INFO: Training Metrics: {"loss_text_ce": 2.774369716644287, "loss_mel_ce": 1.0775442123413086, "loss_gpt_total": 1.1052879095077515, "lr": 3.90625e-08, "it": 2461, "step": 1, "steps": 10, "epoch": 242, "iteration_rate": 5.359435796737671} 23-04-17 13:55:13.033 - INFO: Training Metrics: {"loss_text_ce": 2.773343086242676, "loss_mel_ce": 1.0762990713119507, "loss_gpt_total": 1.1040325164794922, "lr": 3.90625e-08, "it": 2462, "step": 2, "steps": 10, "epoch": 242, "iteration_rate": 5.076251029968262} 23-04-17 13:55:18.267 - INFO: Training Metrics: {"loss_text_ce": 2.7710952758789062, "loss_mel_ce": 1.0753687620162964, "loss_gpt_total": 1.1030796766281128, "lr": 3.90625e-08, "it": 2463, "step": 3, "steps": 10, "epoch": 242, "iteration_rate": 5.20340371131897} 23-04-17 13:55:23.480 - INFO: Training Metrics: {"loss_text_ce": 2.7697482109069824, "loss_mel_ce": 1.0746878385543823, "loss_gpt_total": 1.1023852825164795, "lr": 3.90625e-08, "it": 2464, "step": 4, "steps": 10, "epoch": 242, "iteration_rate": 5.185347080230713} 23-04-17 13:55:28.742 - INFO: Training Metrics: {"loss_text_ce": 2.7692227363586426, "loss_mel_ce": 1.074220895767212, "loss_gpt_total": 1.1019132137298584, "lr": 3.90625e-08, "it": 2465, "step": 5, "steps": 10, "epoch": 242, "iteration_rate": 5.229465007781982} 23-04-17 13:55:33.993 - INFO: Training Metrics: {"loss_text_ce": 2.7691900730133057, "loss_mel_ce": 1.073663592338562, "loss_gpt_total": 1.1013554334640503, "lr": 3.90625e-08, "it": 2466, "step": 6, "steps": 10, "epoch": 242, "iteration_rate": 5.218896150588989} 23-04-17 13:55:39.542 - INFO: Training Metrics: {"loss_text_ce": 2.768052577972412, "loss_mel_ce": 1.073493242263794, "loss_gpt_total": 1.1011736392974854, "lr": 3.90625e-08, "it": 2467, "step": 7, "steps": 10, "epoch": 242, "iteration_rate": 5.517076015472412} 23-04-17 13:55:44.971 - INFO: Training Metrics: {"loss_text_ce": 2.7668919563293457, "loss_mel_ce": 1.07363760471344, "loss_gpt_total": 1.1013065576553345, "lr": 3.90625e-08, "it": 2468, "step": 8, "steps": 10, "epoch": 242, "iteration_rate": 5.393611907958984} 23-04-17 13:55:50.453 - INFO: Training Metrics: {"loss_text_ce": 2.765810966491699, "loss_mel_ce": 1.0739431381225586, "loss_gpt_total": 1.101601243019104, "lr": 3.90625e-08, "it": 2469, "step": 9, "steps": 10, "epoch": 242, "iteration_rate": 5.450122833251953} 23-04-17 13:55:55.686 - INFO: Training Metrics: {"loss_text_ce": 2.7655348777770996, "loss_mel_ce": 1.0741784572601318, "loss_gpt_total": 1.1018340587615967, "lr": 3.90625e-08, "it": 2470, "step": 10, "steps": 10, "epoch": 242, "iteration_rate": 5.200361728668213} 23-04-17 13:56:01.260 - INFO: Training Metrics: {"loss_text_ce": 2.7625539302825928, "loss_mel_ce": 1.0794192552566528, "loss_gpt_total": 1.1070446968078613, "lr": 3.90625e-08, "it": 2471, "step": 1, "steps": 10, "epoch": 243, "iteration_rate": 5.355469465255737} 23-04-17 13:56:06.714 - INFO: Training Metrics: {"loss_text_ce": 2.7626640796661377, "loss_mel_ce": 1.0795437097549438, "loss_gpt_total": 1.1071703433990479, "lr": 3.90625e-08, "it": 2472, "step": 2, "steps": 10, "epoch": 243, "iteration_rate": 5.421551704406738} 23-04-17 13:56:11.783 - INFO: Training Metrics: {"loss_text_ce": 2.764070749282837, "loss_mel_ce": 1.0787067413330078, "loss_gpt_total": 1.1063474416732788, "lr": 3.90625e-08, "it": 2473, "step": 3, "steps": 10, "epoch": 243, "iteration_rate": 5.027517557144165} 23-04-17 13:56:17.002 - INFO: Training Metrics: {"loss_text_ce": 2.7648508548736572, "loss_mel_ce": 1.0781596899032593, "loss_gpt_total": 1.1058082580566406, "lr": 3.90625e-08, "it": 2474, "step": 4, "steps": 10, "epoch": 243, "iteration_rate": 5.18635630607605} 23-04-17 13:56:22.785 - INFO: Training Metrics: {"loss_text_ce": 2.764822483062744, "loss_mel_ce": 1.0780466794967651, "loss_gpt_total": 1.1056948900222778, "lr": 3.90625e-08, "it": 2475, "step": 5, "steps": 10, "epoch": 243, "iteration_rate": 5.75027871131897} 23-04-17 13:56:28.103 - INFO: Training Metrics: {"loss_text_ce": 2.7640793323516846, "loss_mel_ce": 1.0778405666351318, "loss_gpt_total": 1.1054813861846924, "lr": 3.90625e-08, "it": 2476, "step": 6, "steps": 10, "epoch": 243, "iteration_rate": 5.284085273742676} 23-04-17 13:56:33.517 - INFO: Training Metrics: {"loss_text_ce": 2.7629237174987793, "loss_mel_ce": 1.0774613618850708, "loss_gpt_total": 1.1050904989242554, "lr": 3.90625e-08, "it": 2477, "step": 7, "steps": 10, "epoch": 243, "iteration_rate": 5.377448558807373} 23-04-17 13:56:38.878 - INFO: Training Metrics: {"loss_text_ce": 2.762375593185425, "loss_mel_ce": 1.0773411989212036, "loss_gpt_total": 1.1049649715423584, "lr": 3.90625e-08, "it": 2478, "step": 8, "steps": 10, "epoch": 243, "iteration_rate": 5.326313018798828} 23-04-17 13:56:44.307 - INFO: Training Metrics: {"loss_text_ce": 2.761739730834961, "loss_mel_ce": 1.0772053003311157, "loss_gpt_total": 1.1048227548599243, "lr": 3.90625e-08, "it": 2479, "step": 9, "steps": 10, "epoch": 243, "iteration_rate": 5.394559383392334} 23-04-17 13:56:49.676 - INFO: Training Metrics: {"loss_text_ce": 2.7612814903259277, "loss_mel_ce": 1.0772254467010498, "loss_gpt_total": 1.1048381328582764, "lr": 3.90625e-08, "it": 2480, "step": 10, "steps": 10, "epoch": 243, "iteration_rate": 5.335893154144287} 23-04-17 13:56:55.135 - INFO: Training Metrics: {"loss_text_ce": 2.7556638717651367, "loss_mel_ce": 1.0775861740112305, "loss_gpt_total": 1.1051427125930786, "lr": 3.90625e-08, "it": 2481, "step": 1, "steps": 10, "epoch": 244, "iteration_rate": 5.236682176589966} 23-04-17 13:57:00.295 - INFO: Training Metrics: {"loss_text_ce": 2.75398588180542, "loss_mel_ce": 1.0769798755645752, "loss_gpt_total": 1.1045196056365967, "lr": 3.90625e-08, "it": 2482, "step": 2, "steps": 10, "epoch": 244, "iteration_rate": 5.122378587722778} 23-04-17 13:57:05.282 - INFO: Training Metrics: {"loss_text_ce": 2.755331039428711, "loss_mel_ce": 1.0763849020004272, "loss_gpt_total": 1.103938102722168, "lr": 3.90625e-08, "it": 2483, "step": 3, "steps": 10, "epoch": 244, "iteration_rate": 4.954476356506348} 23-04-17 13:57:10.938 - INFO: Training Metrics: {"loss_text_ce": 2.7552309036254883, "loss_mel_ce": 1.0765143632888794, "loss_gpt_total": 1.1040666103363037, "lr": 3.90625e-08, "it": 2484, "step": 4, "steps": 10, "epoch": 244, "iteration_rate": 5.623710632324219} 23-04-17 13:57:16.182 - INFO: Training Metrics: {"loss_text_ce": 2.755204677581787, "loss_mel_ce": 1.0765693187713623, "loss_gpt_total": 1.104121446609497, "lr": 3.90625e-08, "it": 2485, "step": 5, "steps": 10, "epoch": 244, "iteration_rate": 5.208737850189209} 23-04-17 13:57:21.619 - INFO: Training Metrics: {"loss_text_ce": 2.7551708221435547, "loss_mel_ce": 1.0764011144638062, "loss_gpt_total": 1.1039528846740723, "lr": 3.90625e-08, "it": 2486, "step": 6, "steps": 10, "epoch": 244, "iteration_rate": 5.404887437820435} 23-04-17 13:57:27.205 - INFO: Training Metrics: {"loss_text_ce": 2.754345178604126, "loss_mel_ce": 1.0764997005462646, "loss_gpt_total": 1.1040431261062622, "lr": 3.90625e-08, "it": 2487, "step": 7, "steps": 10, "epoch": 244, "iteration_rate": 5.553220987319946} 23-04-17 13:57:32.478 - INFO: Training Metrics: {"loss_text_ce": 2.7542877197265625, "loss_mel_ce": 1.0766524076461792, "loss_gpt_total": 1.1041953563690186, "lr": 3.90625e-08, "it": 2488, "step": 8, "steps": 10, "epoch": 244, "iteration_rate": 5.237905502319336} 23-04-17 13:57:38.043 - INFO: Training Metrics: {"loss_text_ce": 2.753890037536621, "loss_mel_ce": 1.076769471168518, "loss_gpt_total": 1.1043084859848022, "lr": 3.90625e-08, "it": 2489, "step": 9, "steps": 10, "epoch": 244, "iteration_rate": 5.529710054397583} 23-04-17 13:57:43.317 - INFO: Training Metrics: {"loss_text_ce": 2.7537217140197754, "loss_mel_ce": 1.0768731832504272, "loss_gpt_total": 1.1044104099273682, "lr": 3.90625e-08, "it": 2490, "step": 10, "steps": 10, "epoch": 244, "iteration_rate": 5.241695165634155} 23-04-17 13:57:48.953 - INFO: Training Metrics: {"loss_text_ce": 2.7533633708953857, "loss_mel_ce": 1.0790603160858154, "loss_gpt_total": 1.1065940856933594, "lr": 3.90625e-08, "it": 2491, "step": 1, "steps": 10, "epoch": 245, "iteration_rate": 5.431172847747803} 23-04-17 13:57:54.094 - INFO: Training Metrics: {"loss_text_ce": 2.7549118995666504, "loss_mel_ce": 1.0783770084381104, "loss_gpt_total": 1.1059261560440063, "lr": 3.90625e-08, "it": 2492, "step": 2, "steps": 10, "epoch": 245, "iteration_rate": 5.105635166168213} 23-04-17 13:57:59.330 - INFO: Training Metrics: {"loss_text_ce": 2.7573070526123047, "loss_mel_ce": 1.0779926776885986, "loss_gpt_total": 1.1055656671524048, "lr": 3.90625e-08, "it": 2493, "step": 3, "steps": 10, "epoch": 245, "iteration_rate": 5.201913118362427} 23-04-17 13:58:04.408 - INFO: Training Metrics: {"loss_text_ce": 2.759739875793457, "loss_mel_ce": 1.0780494213104248, "loss_gpt_total": 1.1056468486785889, "lr": 3.90625e-08, "it": 2494, "step": 4, "steps": 10, "epoch": 245, "iteration_rate": 5.0420708656311035} 23-04-17 13:58:09.692 - INFO: Training Metrics: {"loss_text_ce": 2.761871814727783, "loss_mel_ce": 1.0777614116668701, "loss_gpt_total": 1.1053801774978638, "lr": 3.90625e-08, "it": 2495, "step": 5, "steps": 10, "epoch": 245, "iteration_rate": 5.2462403774261475} 23-04-17 13:58:15.119 - INFO: Training Metrics: {"loss_text_ce": 2.7630550861358643, "loss_mel_ce": 1.0775516033172607, "loss_gpt_total": 1.1051822900772095, "lr": 3.90625e-08, "it": 2496, "step": 6, "steps": 10, "epoch": 245, "iteration_rate": 5.390755653381348} 23-04-17 13:58:20.609 - INFO: Training Metrics: {"loss_text_ce": 2.7643465995788574, "loss_mel_ce": 1.0777581930160522, "loss_gpt_total": 1.105401635169983, "lr": 3.90625e-08, "it": 2497, "step": 7, "steps": 10, "epoch": 245, "iteration_rate": 5.4579761028289795} 23-04-17 13:58:26.127 - INFO: Training Metrics: {"loss_text_ce": 2.764652729034424, "loss_mel_ce": 1.0779461860656738, "loss_gpt_total": 1.1055927276611328, "lr": 3.90625e-08, "it": 2498, "step": 8, "steps": 10, "epoch": 245, "iteration_rate": 5.48371434211731} 23-04-17 13:58:31.323 - INFO: Training Metrics: {"loss_text_ce": 2.7650816440582275, "loss_mel_ce": 1.0780123472213745, "loss_gpt_total": 1.1056631803512573, "lr": 3.90625e-08, "it": 2499, "step": 9, "steps": 10, "epoch": 245, "iteration_rate": 5.163449048995972} 23-04-17 13:58:40.988 - INFO: Saving models and training states. 23-04-17 13:58:41.056 - INFO: Training Metrics: {"loss_text_ce": 2.765530824661255, "loss_mel_ce": 1.0780727863311768, "loss_gpt_total": 1.1057281494140625, "lr": 3.90625e-08, "it": 2500, "step": 10, "steps": 10, "epoch": 245, "iteration_rate": 5.1202232837677} 23-04-17 13:58:48.277 - INFO: Training Metrics: {"loss_text_ce": 2.7672431468963623, "loss_mel_ce": 1.0762685537338257, "loss_gpt_total": 1.1039409637451172, "lr": 3.90625e-08, "it": 2501, "step": 1, "steps": 10, "epoch": 246, "iteration_rate": 5.404199838638306} 23-04-17 13:58:53.746 - INFO: Training Metrics: {"loss_text_ce": 2.767515182495117, "loss_mel_ce": 1.0767979621887207, "loss_gpt_total": 1.1044731140136719, "lr": 3.90625e-08, "it": 2502, "step": 2, "steps": 10, "epoch": 246, "iteration_rate": 5.431082487106323} 23-04-17 13:58:59.343 - INFO: Training Metrics: {"loss_text_ce": 2.7665042877197266, "loss_mel_ce": 1.0768465995788574, "loss_gpt_total": 1.1045116186141968, "lr": 3.90625e-08, "it": 2503, "step": 3, "steps": 10, "epoch": 246, "iteration_rate": 5.565785646438599} 23-04-17 13:59:04.436 - INFO: Training Metrics: {"loss_text_ce": 2.766180992126465, "loss_mel_ce": 1.076322078704834, "loss_gpt_total": 1.1039838790893555, "lr": 3.90625e-08, "it": 2504, "step": 4, "steps": 10, "epoch": 246, "iteration_rate": 5.056727647781372} 23-04-17 13:59:09.497 - INFO: Training Metrics: {"loss_text_ce": 2.766864776611328, "loss_mel_ce": 1.0759503841400146, "loss_gpt_total": 1.1036189794540405, "lr": 3.90625e-08, "it": 2505, "step": 5, "steps": 10, "epoch": 246, "iteration_rate": 5.028754949569702} 23-04-17 13:59:15.040 - INFO: Training Metrics: {"loss_text_ce": 2.7663328647613525, "loss_mel_ce": 1.0757168531417847, "loss_gpt_total": 1.1033800840377808, "lr": 3.90625e-08, "it": 2506, "step": 6, "steps": 10, "epoch": 246, "iteration_rate": 5.506752014160156} 23-04-17 13:59:20.561 - INFO: Training Metrics: {"loss_text_ce": 2.7659287452697754, "loss_mel_ce": 1.075865626335144, "loss_gpt_total": 1.1035250425338745, "lr": 3.90625e-08, "it": 2507, "step": 7, "steps": 10, "epoch": 246, "iteration_rate": 5.483863353729248} 23-04-17 13:59:25.957 - INFO: Training Metrics: {"loss_text_ce": 2.765368938446045, "loss_mel_ce": 1.07598876953125, "loss_gpt_total": 1.103642463684082, "lr": 3.90625e-08, "it": 2508, "step": 8, "steps": 10, "epoch": 246, "iteration_rate": 5.362503528594971} 23-04-17 13:59:31.373 - INFO: Training Metrics: {"loss_text_ce": 2.7644641399383545, "loss_mel_ce": 1.0761440992355347, "loss_gpt_total": 1.1037887334823608, "lr": 3.90625e-08, "it": 2509, "step": 9, "steps": 10, "epoch": 246, "iteration_rate": 5.3811352252960205} 23-04-17 13:59:37.040 - INFO: Training Metrics: {"loss_text_ce": 2.7635560035705566, "loss_mel_ce": 1.0764150619506836, "loss_gpt_total": 1.1040505170822144, "lr": 3.90625e-08, "it": 2510, "step": 10, "steps": 10, "epoch": 246, "iteration_rate": 5.635450124740601} 23-04-17 13:59:42.518 - INFO: Training Metrics: {"loss_text_ce": 2.7616219520568848, "loss_mel_ce": 1.0787549018859863, "loss_gpt_total": 1.1063710451126099, "lr": 3.90625e-08, "it": 2511, "step": 1, "steps": 10, "epoch": 247, "iteration_rate": 5.269722938537598} 23-04-17 13:59:47.892 - INFO: Training Metrics: {"loss_text_ce": 2.761354923248291, "loss_mel_ce": 1.0783500671386719, "loss_gpt_total": 1.1059634685516357, "lr": 3.90625e-08, "it": 2512, "step": 2, "steps": 10, "epoch": 247, "iteration_rate": 5.34055495262146} 23-04-17 13:59:53.067 - INFO: Training Metrics: {"loss_text_ce": 2.7608137130737305, "loss_mel_ce": 1.0779255628585815, "loss_gpt_total": 1.105533480644226, "lr": 3.90625e-08, "it": 2513, "step": 3, "steps": 10, "epoch": 247, "iteration_rate": 5.142697334289551} 23-04-17 13:59:58.720 - INFO: Training Metrics: {"loss_text_ce": 2.7594833374023438, "loss_mel_ce": 1.0777710676193237, "loss_gpt_total": 1.1053658723831177, "lr": 3.90625e-08, "it": 2514, "step": 4, "steps": 10, "epoch": 247, "iteration_rate": 5.613735914230347} 23-04-17 14:00:03.881 - INFO: Training Metrics: {"loss_text_ce": 2.7591471672058105, "loss_mel_ce": 1.077399492263794, "loss_gpt_total": 1.1049909591674805, "lr": 3.90625e-08, "it": 2515, "step": 5, "steps": 10, "epoch": 247, "iteration_rate": 5.1330461502075195} 23-04-17 14:00:09.261 - INFO: Training Metrics: {"loss_text_ce": 2.7592508792877197, "loss_mel_ce": 1.0770992040634155, "loss_gpt_total": 1.104691743850708, "lr": 3.90625e-08, "it": 2516, "step": 6, "steps": 10, "epoch": 247, "iteration_rate": 5.347477674484253} 23-04-17 14:00:14.594 - INFO: Training Metrics: {"loss_text_ce": 2.760010242462158, "loss_mel_ce": 1.077028512954712, "loss_gpt_total": 1.1046284437179565, "lr": 3.90625e-08, "it": 2517, "step": 7, "steps": 10, "epoch": 247, "iteration_rate": 5.298018455505371} 23-04-17 14:00:20.425 - INFO: Training Metrics: {"loss_text_ce": 2.76029372215271, "loss_mel_ce": 1.0771474838256836, "loss_gpt_total": 1.1047502756118774, "lr": 3.90625e-08, "it": 2518, "step": 8, "steps": 10, "epoch": 247, "iteration_rate": 5.7946555614471436} 23-04-17 14:00:25.771 - INFO: Training Metrics: {"loss_text_ce": 2.7604575157165527, "loss_mel_ce": 1.0771511793136597, "loss_gpt_total": 1.1047557592391968, "lr": 3.90625e-08, "it": 2519, "step": 9, "steps": 10, "epoch": 247, "iteration_rate": 5.3116655349731445} 23-04-17 14:00:31.215 - INFO: Training Metrics: {"loss_text_ce": 2.7606871128082275, "loss_mel_ce": 1.07711660861969, "loss_gpt_total": 1.1047232151031494, "lr": 3.90625e-08, "it": 2520, "step": 10, "steps": 10, "epoch": 247, "iteration_rate": 5.408016681671143} 23-04-17 14:00:36.679 - INFO: Training Metrics: {"loss_text_ce": 2.7642455101013184, "loss_mel_ce": 1.076046109199524, "loss_gpt_total": 1.1036884784698486, "lr": 3.90625e-08, "it": 2521, "step": 1, "steps": 10, "epoch": 248, "iteration_rate": 5.26603627204895} 23-04-17 14:00:42.374 - INFO: Training Metrics: {"loss_text_ce": 2.7634449005126953, "loss_mel_ce": 1.076456069946289, "loss_gpt_total": 1.1040904521942139, "lr": 3.90625e-08, "it": 2522, "step": 2, "steps": 10, "epoch": 248, "iteration_rate": 5.659841060638428} 23-04-17 14:00:47.882 - INFO: Training Metrics: {"loss_text_ce": 2.7633132934570312, "loss_mel_ce": 1.0768327713012695, "loss_gpt_total": 1.1044659614562988, "lr": 3.90625e-08, "it": 2523, "step": 3, "steps": 10, "epoch": 248, "iteration_rate": 5.469837188720703} 23-04-17 14:00:53.293 - INFO: Training Metrics: {"loss_text_ce": 2.7642457485198975, "loss_mel_ce": 1.0772480964660645, "loss_gpt_total": 1.1048905849456787, "lr": 3.90625e-08, "it": 2524, "step": 4, "steps": 10, "epoch": 248, "iteration_rate": 5.377684593200684} 23-04-17 14:00:58.652 - INFO: Training Metrics: {"loss_text_ce": 2.7649178504943848, "loss_mel_ce": 1.077579140663147, "loss_gpt_total": 1.1052281856536865, "lr": 3.90625e-08, "it": 2525, "step": 5, "steps": 10, "epoch": 248, "iteration_rate": 5.326979398727417} 23-04-17 14:01:04.077 - INFO: Training Metrics: {"loss_text_ce": 2.763570785522461, "loss_mel_ce": 1.077386498451233, "loss_gpt_total": 1.1050223112106323, "lr": 3.90625e-08, "it": 2526, "step": 6, "steps": 10, "epoch": 248, "iteration_rate": 5.392202377319336} 23-04-17 14:01:09.372 - INFO: Training Metrics: {"loss_text_ce": 2.7627451419830322, "loss_mel_ce": 1.0772569179534912, "loss_gpt_total": 1.104884386062622, "lr": 3.90625e-08, "it": 2527, "step": 7, "steps": 10, "epoch": 248, "iteration_rate": 5.261451959609985} 23-04-17 14:01:14.825 - INFO: Training Metrics: {"loss_text_ce": 2.761331558227539, "loss_mel_ce": 1.0772135257720947, "loss_gpt_total": 1.104826807975769, "lr": 3.90625e-08, "it": 2528, "step": 8, "steps": 10, "epoch": 248, "iteration_rate": 5.419564247131348} 23-04-17 14:01:19.987 - INFO: Training Metrics: {"loss_text_ce": 2.760371208190918, "loss_mel_ce": 1.0772740840911865, "loss_gpt_total": 1.1048777103424072, "lr": 3.90625e-08, "it": 2529, "step": 9, "steps": 10, "epoch": 248, "iteration_rate": 5.128811836242676} 23-04-17 14:01:25.711 - INFO: Training Metrics: {"loss_text_ce": 2.7596633434295654, "loss_mel_ce": 1.077347755432129, "loss_gpt_total": 1.1049444675445557, "lr": 3.90625e-08, "it": 2530, "step": 10, "steps": 10, "epoch": 248, "iteration_rate": 5.691448450088501} 23-04-17 14:01:31.202 - INFO: Training Metrics: {"loss_text_ce": 2.7545931339263916, "loss_mel_ce": 1.07760488986969, "loss_gpt_total": 1.1051509380340576, "lr": 3.90625e-08, "it": 2531, "step": 1, "steps": 10, "epoch": 249, "iteration_rate": 5.268348932266235} 23-04-17 14:01:36.682 - INFO: Training Metrics: {"loss_text_ce": 2.754469156265259, "loss_mel_ce": 1.0777028799057007, "loss_gpt_total": 1.1052477359771729, "lr": 3.90625e-08, "it": 2532, "step": 2, "steps": 10, "epoch": 249, "iteration_rate": 5.44597053527832} 23-04-17 14:01:41.622 - INFO: Training Metrics: {"loss_text_ce": 2.7564830780029297, "loss_mel_ce": 1.0775014162063599, "loss_gpt_total": 1.1050664186477661, "lr": 3.90625e-08, "it": 2533, "step": 3, "steps": 10, "epoch": 249, "iteration_rate": 4.907240629196167} 23-04-17 14:01:46.917 - INFO: Training Metrics: {"loss_text_ce": 2.758206367492676, "loss_mel_ce": 1.0777086019515991, "loss_gpt_total": 1.1052907705307007, "lr": 3.90625e-08, "it": 2534, "step": 4, "steps": 10, "epoch": 249, "iteration_rate": 5.262513160705566} 23-04-17 14:01:52.021 - INFO: Training Metrics: {"loss_text_ce": 2.759000301361084, "loss_mel_ce": 1.0776197910308838, "loss_gpt_total": 1.1052097082138062, "lr": 3.90625e-08, "it": 2535, "step": 5, "steps": 10, "epoch": 249, "iteration_rate": 5.068845748901367} 23-04-17 14:01:57.282 - INFO: Training Metrics: {"loss_text_ce": 2.7594616413116455, "loss_mel_ce": 1.0775761604309082, "loss_gpt_total": 1.1051708459854126, "lr": 3.90625e-08, "it": 2536, "step": 6, "steps": 10, "epoch": 249, "iteration_rate": 5.223066091537476} 23-04-17 14:02:02.668 - INFO: Training Metrics: {"loss_text_ce": 2.759352207183838, "loss_mel_ce": 1.0775877237319946, "loss_gpt_total": 1.105181336402893, "lr": 3.90625e-08, "it": 2537, "step": 7, "steps": 10, "epoch": 249, "iteration_rate": 5.351496458053589} 23-04-17 14:02:07.854 - INFO: Training Metrics: {"loss_text_ce": 2.7588579654693604, "loss_mel_ce": 1.0776170492172241, "loss_gpt_total": 1.105205774307251, "lr": 3.90625e-08, "it": 2538, "step": 8, "steps": 10, "epoch": 249, "iteration_rate": 5.153621196746826} 23-04-17 14:02:12.946 - INFO: Training Metrics: {"loss_text_ce": 2.758111000061035, "loss_mel_ce": 1.0776387453079224, "loss_gpt_total": 1.1052199602127075, "lr": 3.90625e-08, "it": 2539, "step": 9, "steps": 10, "epoch": 249, "iteration_rate": 5.059805870056152} 23-04-17 14:02:18.370 - INFO: Training Metrics: {"loss_text_ce": 2.7571511268615723, "loss_mel_ce": 1.0775513648986816, "loss_gpt_total": 1.1051229238510132, "lr": 3.90625e-08, "it": 2540, "step": 10, "steps": 10, "epoch": 249, "iteration_rate": 5.39172101020813} 23-04-17 14:02:23.896 - INFO: Training Metrics: {"loss_text_ce": 2.7445075511932373, "loss_mel_ce": 1.077515959739685, "loss_gpt_total": 1.1049610376358032, "lr": 3.90625e-08, "it": 2541, "step": 1, "steps": 10, "epoch": 250, "iteration_rate": 5.328975439071655} 23-04-17 14:02:29.137 - INFO: Training Metrics: {"loss_text_ce": 2.745494842529297, "loss_mel_ce": 1.078084945678711, "loss_gpt_total": 1.1055399179458618, "lr": 3.90625e-08, "it": 2542, "step": 2, "steps": 10, "epoch": 250, "iteration_rate": 5.205178499221802} 23-04-17 14:02:34.449 - INFO: Training Metrics: {"loss_text_ce": 2.7451517581939697, "loss_mel_ce": 1.07760488986969, "loss_gpt_total": 1.1050565242767334, "lr": 3.90625e-08, "it": 2543, "step": 3, "steps": 10, "epoch": 250, "iteration_rate": 5.279070615768433} 23-04-17 14:02:39.814 - INFO: Training Metrics: {"loss_text_ce": 2.745204448699951, "loss_mel_ce": 1.077518105506897, "loss_gpt_total": 1.1049702167510986, "lr": 3.90625e-08, "it": 2544, "step": 4, "steps": 10, "epoch": 250, "iteration_rate": 5.332399606704712} 23-04-17 14:02:44.912 - INFO: Training Metrics: {"loss_text_ce": 2.7460873126983643, "loss_mel_ce": 1.0772807598114014, "loss_gpt_total": 1.1047416925430298, "lr": 3.90625e-08, "it": 2545, "step": 5, "steps": 10, "epoch": 250, "iteration_rate": 5.062662363052368} 23-04-17 14:02:50.150 - INFO: Training Metrics: {"loss_text_ce": 2.7456247806549072, "loss_mel_ce": 1.0770293474197388, "loss_gpt_total": 1.1044856309890747, "lr": 3.90625e-08, "it": 2546, "step": 6, "steps": 10, "epoch": 250, "iteration_rate": 5.2061543464660645} 23-04-17 14:02:55.630 - INFO: Training Metrics: {"loss_text_ce": 2.745784282684326, "loss_mel_ce": 1.0768543481826782, "loss_gpt_total": 1.1043121814727783, "lr": 3.90625e-08, "it": 2547, "step": 7, "steps": 10, "epoch": 250, "iteration_rate": 5.442171812057495} 23-04-17 14:03:01.256 - INFO: Training Metrics: {"loss_text_ce": 2.7461302280426025, "loss_mel_ce": 1.0766364336013794, "loss_gpt_total": 1.104097843170166, "lr": 3.90625e-08, "it": 2548, "step": 8, "steps": 10, "epoch": 250, "iteration_rate": 5.591397047042847} 23-04-17 14:03:06.674 - INFO: Training Metrics: {"loss_text_ce": 2.7463810443878174, "loss_mel_ce": 1.076277494430542, "loss_gpt_total": 1.1037414073944092, "lr": 3.90625e-08, "it": 2549, "step": 9, "steps": 10, "epoch": 250, "iteration_rate": 5.382100820541382} 23-04-17 14:03:12.122 - INFO: Training Metrics: {"loss_text_ce": 2.746581554412842, "loss_mel_ce": 1.0761140584945679, "loss_gpt_total": 1.1035799980163574, "lr": 3.90625e-08, "it": 2550, "step": 10, "steps": 10, "epoch": 250, "iteration_rate": 5.415835618972778} 23-04-17 14:03:17.034 - INFO: Saving models and training states. 23-04-17 14:03:17.039 - INFO: Finished training!