File size: 3,874 Bytes
d92d67f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
{
  "best_metric": 0.023628102615475655,
  "best_model_checkpoint": "./mixstral/01-04-24-Weni-WeniGPT-QA-Zephyr-7B-5.0.1-KTO_WeniGPT Experiment using KTO trainer with no collator, Mixstral model and no system prompt.-2_max_steps-262_batch_32_2024-04-01_ppid_2059/checkpoint-100",
  "epoch": 0.7582938388625592,
  "eval_steps": 50,
  "global_step": 100,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.15,
      "grad_norm": 1.2250428199768066,
      "learning_rate": 0.00019133858267716535,
      "loss": 0.3931,
      "step": 20,
      "train/kl": 1.2274655103683472,
      "train/logps/chosen": -192.38479614257812,
      "train/logps/rejected": -224.69888305664062,
      "train/rewards/chosen": -0.14804823696613312,
      "train/rewards/margins": 1.4835635870695114,
      "train/rewards/rejected": -1.6316118240356445
    },
    {
      "epoch": 0.3,
      "grad_norm": 2.3648412227630615,
      "learning_rate": 0.00017559055118110238,
      "loss": 0.1177,
      "step": 40,
      "train/kl": 0.9526737332344055,
      "train/logps/chosen": -162.2818603515625,
      "train/logps/rejected": -265.06201171875,
      "train/rewards/chosen": 3.2411201000213623,
      "train/rewards/margins": 10.128443956375122,
      "train/rewards/rejected": -6.88732385635376
    },
    {
      "epoch": 0.38,
      "eval/kl": 0.0,
      "eval/logps/chosen": -149.90289306640625,
      "eval/logps/rejected": -403.8857727050781,
      "eval/rewards/chosen": 4.500288486480713,
      "eval/rewards/margins": 24.8637433052063,
      "eval/rewards/rejected": -20.363454818725586,
      "eval_loss": 0.04682931676506996,
      "eval_runtime": 1026.2439,
      "eval_samples_per_second": 0.476,
      "eval_steps_per_second": 0.119,
      "step": 50
    },
    {
      "epoch": 0.45,
      "grad_norm": 1.3626991510391235,
      "learning_rate": 0.00015984251968503938,
      "loss": 0.0493,
      "step": 60,
      "train/kl": 0.10373685508966446,
      "train/logps/chosen": -142.3507080078125,
      "train/logps/rejected": -377.33282470703125,
      "train/rewards/chosen": 5.036634922027588,
      "train/rewards/margins": 22.215031147003174,
      "train/rewards/rejected": -17.178396224975586
    },
    {
      "epoch": 0.61,
      "grad_norm": 0.18376566469669342,
      "learning_rate": 0.0001440944881889764,
      "loss": 0.0223,
      "step": 80,
      "train/kl": 0.26756417751312256,
      "train/logps/chosen": -134.23001098632812,
      "train/logps/rejected": -367.7132263183594,
      "train/rewards/chosen": 5.875685691833496,
      "train/rewards/margins": 24.18467617034912,
      "train/rewards/rejected": -18.308990478515625
    },
    {
      "epoch": 0.76,
      "grad_norm": 0.5995637774467468,
      "learning_rate": 0.00012834645669291338,
      "loss": 0.0257,
      "step": 100,
      "train/kl": 0.0,
      "train/logps/chosen": -145.93228149414062,
      "train/logps/rejected": -409.9689025878906,
      "train/rewards/chosen": 6.230437755584717,
      "train/rewards/margins": 27.460736751556396,
      "train/rewards/rejected": -21.23029899597168
    },
    {
      "epoch": 0.76,
      "eval/kl": 0.2827310264110565,
      "eval/logps/chosen": -131.14064025878906,
      "eval/logps/rejected": -441.502197265625,
      "eval/rewards/chosen": 6.376513957977295,
      "eval/rewards/margins": 30.501620769500732,
      "eval/rewards/rejected": -24.125106811523438,
      "eval_loss": 0.023628102615475655,
      "eval_runtime": 1026.1735,
      "eval_samples_per_second": 0.476,
      "eval_steps_per_second": 0.119,
      "step": 100
    }
  ],
  "logging_steps": 20,
  "max_steps": 262,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}