File size: 2,396 Bytes
5d737a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
{
  "best_metric": 1.88290536403656,
  "best_model_checkpoint": "$../logs/03-08-2024T09-55-11-detailclip-b32/checkpoint-58",
  "epoch": 0.5043478260869565,
  "eval_steps": 58,
  "global_step": 58,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.05217391304347826,
      "grad_norm": 2.7094666957855225,
      "learning_rate": 1.2e-05,
      "loss": 0.7099,
      "step": 6
    },
    {
      "epoch": 0.10434782608695652,
      "grad_norm": 2.6887893676757812,
      "learning_rate": 2.4e-05,
      "loss": 0.6948,
      "step": 12
    },
    {
      "epoch": 0.1565217391304348,
      "grad_norm": 2.56785249710083,
      "learning_rate": 3.6e-05,
      "loss": 0.7276,
      "step": 18
    },
    {
      "epoch": 0.20869565217391303,
      "grad_norm": 2.413269281387329,
      "learning_rate": 4.8e-05,
      "loss": 0.5937,
      "step": 24
    },
    {
      "epoch": 0.2608695652173913,
      "grad_norm": 2.0822973251342773,
      "learning_rate": 6e-05,
      "loss": 0.4702,
      "step": 30
    },
    {
      "epoch": 0.3130434782608696,
      "grad_norm": 1.9383469820022583,
      "learning_rate": 7.2e-05,
      "loss": 0.4579,
      "step": 36
    },
    {
      "epoch": 0.3652173913043478,
      "grad_norm": 1.6925252676010132,
      "learning_rate": 8.4e-05,
      "loss": 0.4028,
      "step": 42
    },
    {
      "epoch": 0.41739130434782606,
      "grad_norm": 1.5693070888519287,
      "learning_rate": 9.6e-05,
      "loss": 0.3466,
      "step": 48
    },
    {
      "epoch": 0.46956521739130436,
      "grad_norm": 1.8624712228775024,
      "learning_rate": 9.999673735634258e-05,
      "loss": 0.31,
      "step": 54
    },
    {
      "epoch": 0.5043478260869565,
      "eval_loss": 1.88290536403656,
      "eval_runtime": 371.0087,
      "eval_samples_per_second": 6.021,
      "eval_steps_per_second": 0.049,
      "step": 58
    },
    {
      "epoch": 0.5043478260869565,
      "neg_increments": 0.40031376906933136,
      "positive_increments": 0.6000771337979885,
      "rate_increments": 0.60599292530573,
      "step": 58
    }
  ],
  "logging_steps": 6,
  "max_steps": 1150,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 10,
  "save_steps": 58,
  "total_flos": 1318444128207360.0,
  "train_batch_size": 128,
  "trial_name": null,
  "trial_params": null
}