Spaces:
Build error
Build error
machine-translation
/
llama-factory
/saves
/Mistral-7B-v0.3-Chinese-Chat
/checkpoint-70
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.99288256227758, | |
"eval_steps": 35, | |
"global_step": 70, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.1423487544483986, | |
"grad_norm": 5.176560878753662, | |
"learning_rate": 2.380952380952381e-05, | |
"loss": 2.7275, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.2846975088967972, | |
"grad_norm": 3.7604565620422363, | |
"learning_rate": 4.761904761904762e-05, | |
"loss": 2.3848, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.42704626334519574, | |
"grad_norm": 1.9476191997528076, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 1.9534, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.5693950177935944, | |
"grad_norm": 1.1864774227142334, | |
"learning_rate": 9.523809523809524e-05, | |
"loss": 1.7776, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.7117437722419929, | |
"grad_norm": 1.2619398832321167, | |
"learning_rate": 9.988952191691925e-05, | |
"loss": 1.6551, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.8540925266903915, | |
"grad_norm": 1.204907774925232, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 1.6106, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.99644128113879, | |
"grad_norm": 1.030988097190857, | |
"learning_rate": 9.865224352899119e-05, | |
"loss": 1.6196, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.99644128113879, | |
"eval_loss": 1.531648874282837, | |
"eval_runtime": 3.1223, | |
"eval_samples_per_second": 14.733, | |
"eval_steps_per_second": 14.733, | |
"step": 35 | |
}, | |
{ | |
"epoch": 1.1387900355871885, | |
"grad_norm": 1.0289065837860107, | |
"learning_rate": 9.752707744739145e-05, | |
"loss": 1.4904, | |
"step": 40 | |
}, | |
{ | |
"epoch": 1.281138790035587, | |
"grad_norm": 1.2606865167617798, | |
"learning_rate": 9.607381059352038e-05, | |
"loss": 1.4244, | |
"step": 45 | |
}, | |
{ | |
"epoch": 1.4234875444839858, | |
"grad_norm": 1.2899017333984375, | |
"learning_rate": 9.430247552150673e-05, | |
"loss": 1.4003, | |
"step": 50 | |
}, | |
{ | |
"epoch": 1.5658362989323842, | |
"grad_norm": 1.3828767538070679, | |
"learning_rate": 9.22253005533154e-05, | |
"loss": 1.3709, | |
"step": 55 | |
}, | |
{ | |
"epoch": 1.708185053380783, | |
"grad_norm": 1.5857073068618774, | |
"learning_rate": 8.985662536114613e-05, | |
"loss": 1.3789, | |
"step": 60 | |
}, | |
{ | |
"epoch": 1.8505338078291815, | |
"grad_norm": 1.4692878723144531, | |
"learning_rate": 8.721280197423258e-05, | |
"loss": 1.3579, | |
"step": 65 | |
}, | |
{ | |
"epoch": 1.99288256227758, | |
"grad_norm": 1.452291488647461, | |
"learning_rate": 8.43120818934367e-05, | |
"loss": 1.3654, | |
"step": 70 | |
}, | |
{ | |
"epoch": 1.99288256227758, | |
"eval_loss": 1.4582531452178955, | |
"eval_runtime": 3.0596, | |
"eval_samples_per_second": 15.035, | |
"eval_steps_per_second": 15.035, | |
"step": 70 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 210, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 6, | |
"save_steps": 35, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 9.920445563260109e+16, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |