Question Answering
Transformers
PyTorch
Safetensors
French
camembert
Inference Endpoints
QAmembert / trainer_state.json
pierre-catie's picture
Upload 11 files
bcd3b26
raw
history blame
3.23 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9486271036315324,
"global_step": 11000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"learning_rate": 7.374631268436578e-05,
"loss": 3.876,
"step": 500
},
{
"epoch": 0.18,
"learning_rate": 9.696569920844328e-05,
"loss": 1.5925,
"step": 1000
},
{
"epoch": 0.27,
"learning_rate": 9.2254052016585e-05,
"loss": 1.2572,
"step": 1500
},
{
"epoch": 0.35,
"learning_rate": 8.754240482472673e-05,
"loss": 1.1371,
"step": 2000
},
{
"epoch": 0.44,
"learning_rate": 8.283075763286845e-05,
"loss": 1.0932,
"step": 2500
},
{
"epoch": 0.53,
"learning_rate": 7.811911044101018e-05,
"loss": 1.0323,
"step": 3000
},
{
"epoch": 0.62,
"learning_rate": 7.340746324915191e-05,
"loss": 1.0333,
"step": 3500
},
{
"epoch": 0.71,
"learning_rate": 6.869581605729364e-05,
"loss": 0.9684,
"step": 4000
},
{
"epoch": 0.8,
"learning_rate": 6.398416886543535e-05,
"loss": 0.9606,
"step": 4500
},
{
"epoch": 0.89,
"learning_rate": 5.927252167357709e-05,
"loss": 0.9397,
"step": 5000
},
{
"epoch": 0.97,
"learning_rate": 5.4560874481718815e-05,
"loss": 0.8899,
"step": 5500
},
{
"epoch": 1.0,
"eval_loss": 1.0070745944976807,
"eval_runtime": 18.4896,
"eval_samples_per_second": 176.37,
"eval_steps_per_second": 7.355,
"step": 5645
},
{
"epoch": 1.06,
"learning_rate": 4.984922728986054e-05,
"loss": 0.733,
"step": 6000
},
{
"epoch": 1.15,
"learning_rate": 4.5137580098002266e-05,
"loss": 0.6544,
"step": 6500
},
{
"epoch": 1.24,
"learning_rate": 4.0425932906143985e-05,
"loss": 0.6608,
"step": 7000
},
{
"epoch": 1.33,
"learning_rate": 3.571428571428572e-05,
"loss": 0.6493,
"step": 7500
},
{
"epoch": 1.42,
"learning_rate": 3.100263852242744e-05,
"loss": 0.6352,
"step": 8000
},
{
"epoch": 1.51,
"learning_rate": 2.629099133056917e-05,
"loss": 0.6268,
"step": 8500
},
{
"epoch": 1.59,
"learning_rate": 2.1579344138710894e-05,
"loss": 0.6172,
"step": 9000
},
{
"epoch": 1.68,
"learning_rate": 1.686769694685262e-05,
"loss": 0.6159,
"step": 9500
},
{
"epoch": 1.77,
"learning_rate": 1.2156049754994346e-05,
"loss": 0.5908,
"step": 10000
},
{
"epoch": 1.86,
"learning_rate": 7.444402563136073e-06,
"loss": 0.59,
"step": 10500
},
{
"epoch": 1.95,
"learning_rate": 2.7327553712777988e-06,
"loss": 0.582,
"step": 11000
}
],
"max_steps": 11290,
"num_train_epochs": 2,
"total_flos": 6.897685654641254e+16,
"trial_name": null,
"trial_params": null
}