wzhouad commited on
Commit
1315aca
1 Parent(s): b7d4f26

Model save

Browse files
README.md CHANGED
@@ -13,10 +13,19 @@ model-index:
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanqiang/wdpo/runs/1co2gbm0)
17
  # zephyr-7b-dpo-full
18
 
19
  This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
 
 
 
 
 
 
 
 
 
 
20
 
21
  ## Model description
22
 
@@ -38,11 +47,11 @@ The following hyperparameters were used during training:
38
  - learning_rate: 5e-07
39
  - train_batch_size: 8
40
  - eval_batch_size: 8
41
- - seed: 42
42
  - distributed_type: multi-GPU
43
  - num_devices: 8
44
- - gradient_accumulation_steps: 2
45
- - total_train_batch_size: 128
46
  - total_eval_batch_size: 64
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
  - lr_scheduler_type: cosine
@@ -51,11 +60,17 @@ The following hyperparameters were used during training:
51
 
52
  ### Training results
53
 
 
 
 
 
 
 
54
 
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.41.0.dev0
59
  - Pytorch 2.1.2+cu121
60
  - Datasets 2.14.6
61
- - Tokenizers 0.19.1
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
 
16
  # zephyr-7b-dpo-full
17
 
18
  This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0544
21
+ - Rewards/chosen: -2.1973
22
+ - Rewards/rejected: -2.9243
23
+ - Rewards/accuracies: 0.7070
24
+ - Rewards/margins: 0.7270
25
+ - Logps/rejected: -549.7877
26
+ - Logps/chosen: -476.7722
27
+ - Logits/rejected: -1.9407
28
+ - Logits/chosen: -1.9849
29
 
30
  ## Model description
31
 
 
47
  - learning_rate: 5e-07
48
  - train_batch_size: 8
49
  - eval_batch_size: 8
50
+ - seed: 3
51
  - distributed_type: multi-GPU
52
  - num_devices: 8
53
+ - gradient_accumulation_steps: 4
54
+ - total_train_batch_size: 256
55
  - total_eval_batch_size: 64
56
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
  - lr_scheduler_type: cosine
 
60
 
61
  ### Training results
62
 
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.0846 | 0.23 | 100 | 0.0846 | -1.4642 | -1.8940 | 0.6484 | 0.4298 | -446.7535 | -403.4620 | -2.3302 | -2.3522 |
66
+ | 0.0477 | 0.45 | 200 | 0.0672 | -1.7958 | -2.4017 | 0.7148 | 0.6059 | -497.5217 | -436.6205 | -2.1284 | -2.1617 |
67
+ | 0.046 | 0.68 | 300 | 0.0552 | -2.1484 | -2.8722 | 0.7148 | 0.7238 | -544.5698 | -471.8781 | -1.9484 | -1.9914 |
68
+ | 0.0439 | 0.91 | 400 | 0.0544 | -2.1973 | -2.9243 | 0.7070 | 0.7270 | -549.7877 | -476.7722 | -1.9407 | -1.9849 |
69
 
70
 
71
  ### Framework versions
72
 
73
+ - Transformers 4.35.2
74
  - Pytorch 2.1.2+cu121
75
  - Datasets 2.14.6
76
+ - Tokenizers 0.14.1
all_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "total_flos": 0.0,
4
- "train_loss": 0.20106691993632406,
5
- "train_runtime": 384.3035,
6
- "train_samples": 6750,
7
- "train_samples_per_second": 17.564,
8
- "train_steps_per_second": 0.138
9
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.07998780592802972,
4
+ "train_runtime": 7378.9712,
5
+ "train_samples": 113028,
6
+ "train_samples_per_second": 15.318,
7
+ "train_steps_per_second": 0.06
 
8
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.41.0.dev0"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.35.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8447ede9197c1eeacfbf762bd1dd37ac36cbe1253ff50146187fe9603ad0090
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811536fd507edc48842921f5fe58009c4d68ba8670577fea3ab063a8b487349b
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d86c29256deb0c7545e2b1962d531ec89a1631e798e8bb93621523bdbd4aee4
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd5a311a089f3ba60ceb62659015c6d5a3aad05b534064201ca1668bddfcc05d
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c323f2aa09952bda16630d8515e40ec79723cad60bb91f6995b1fff7896b7e13
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629550c668df231f1f9b855ba38797fad39fafa90ac09f07dda826c88b931c9a
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "total_flos": 0.0,
4
- "train_loss": 0.20106691993632406,
5
- "train_runtime": 384.3035,
6
- "train_samples": 6750,
7
- "train_samples_per_second": 17.564,
8
- "train_steps_per_second": 0.138
9
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.07998780592802972,
4
+ "train_runtime": 7378.9712,
5
+ "train_samples": 113028,
6
+ "train_samples_per_second": 15.318,
7
+ "train_steps_per_second": 0.06
 
8
  }
trainer_state.json CHANGED
@@ -1,25 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 100,
6
- "global_step": 53,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "debug/losses": 0.17192834615707397,
13
- "debug/policy_weights": 0.24804016947746277,
14
- "debug/raw_losses": 0.6931471824645996,
15
- "epoch": 0.018867924528301886,
16
- "grad_norm": 3.078622153428569,
17
- "learning_rate": 8.333333333333333e-08,
18
- "logits/chosen": -2.855412006378174,
19
- "logits/rejected": -2.8797199726104736,
20
- "logps/chosen": -320.43853759765625,
21
- "logps/rejected": -340.07073974609375,
22
- "loss": 0.2116,
23
  "rewards/accuracies": 0.0,
24
  "rewards/chosen": 0.0,
25
  "rewards/margins": 0.0,
@@ -27,124 +23,700 @@
27
  "step": 1
28
  },
29
  {
30
- "debug/losses": 0.20365437865257263,
31
- "debug/policy_weights": 0.2948223948478699,
32
- "debug/raw_losses": 0.6906725764274597,
33
- "epoch": 0.18867924528301888,
34
- "grad_norm": 3.2072554177174166,
35
- "learning_rate": 4.911172937635942e-07,
36
- "logits/chosen": -2.8659250736236572,
37
- "logits/rejected": -2.8918118476867676,
38
- "logps/chosen": -305.7447509765625,
39
- "logps/rejected": -332.24249267578125,
40
- "loss": 0.2033,
41
- "rewards/accuracies": 0.5416666865348816,
42
- "rewards/chosen": 0.0007928035338409245,
43
- "rewards/margins": 0.005046091042459011,
44
- "rewards/rejected": -0.004253287799656391,
45
  "step": 10
46
  },
47
  {
48
- "debug/losses": 0.2035388946533203,
49
- "debug/policy_weights": 0.3013075888156891,
50
- "debug/raw_losses": 0.6750057935714722,
51
- "epoch": 0.37735849056603776,
52
- "grad_norm": 3.0690801879183462,
53
- "learning_rate": 3.982949361823388e-07,
54
- "logits/chosen": -2.8558077812194824,
55
- "logits/rejected": -2.85764479637146,
56
- "logps/chosen": -324.26263427734375,
57
- "logps/rejected": -331.02935791015625,
58
- "loss": 0.2069,
59
- "rewards/accuracies": 0.6312500238418579,
60
- "rewards/chosen": 0.0035451077856123447,
61
- "rewards/margins": 0.04073050990700722,
62
- "rewards/rejected": -0.03718540072441101,
63
  "step": 20
64
  },
65
  {
66
- "debug/losses": 0.20855382084846497,
67
- "debug/policy_weights": 0.32070785760879517,
68
- "debug/raw_losses": 0.6490412354469299,
69
- "epoch": 0.5660377358490566,
70
- "grad_norm": 3.2770260980043857,
71
- "learning_rate": 2.416462557480814e-07,
72
- "logits/chosen": -2.8393235206604004,
73
- "logits/rejected": -2.84259033203125,
74
- "logps/chosen": -298.1471252441406,
75
- "logps/rejected": -313.35174560546875,
76
- "loss": 0.2011,
77
- "rewards/accuracies": 0.7437499761581421,
78
- "rewards/chosen": -0.013330144807696342,
79
- "rewards/margins": 0.11589495837688446,
80
- "rewards/rejected": -0.12922510504722595,
81
  "step": 30
82
  },
83
  {
84
- "debug/losses": 0.18307599425315857,
85
- "debug/policy_weights": 0.303046315908432,
86
- "debug/raw_losses": 0.5849612951278687,
87
- "epoch": 0.7547169811320755,
88
- "grad_norm": 2.716351513439798,
89
- "learning_rate": 8.859303711029939e-08,
90
- "logits/chosen": -2.8420355319976807,
91
- "logits/rejected": -2.839024543762207,
92
- "logps/chosen": -290.5810852050781,
93
- "logps/rejected": -320.720458984375,
94
- "loss": 0.1967,
95
- "rewards/accuracies": 0.731249988079071,
96
- "rewards/chosen": -0.010476941242814064,
97
- "rewards/margins": 0.30531758069992065,
98
- "rewards/rejected": -0.31579452753067017,
99
  "step": 40
100
  },
101
  {
102
- "debug/losses": 0.2313450276851654,
103
- "debug/policy_weights": 0.35370975732803345,
104
- "debug/raw_losses": 0.647036612033844,
105
- "epoch": 0.9433962264150944,
106
- "grad_norm": 3.007767493180951,
107
- "learning_rate": 5.009573740853313e-09,
108
- "logits/chosen": -2.8769078254699707,
109
- "logits/rejected": -2.8638930320739746,
110
- "logps/chosen": -282.02764892578125,
111
- "logps/rejected": -316.0559997558594,
112
- "loss": 0.1938,
113
- "rewards/accuracies": 0.668749988079071,
114
- "rewards/chosen": -0.026972616091370583,
115
- "rewards/margins": 0.15525773167610168,
116
- "rewards/rejected": -0.18223035335540771,
117
  "step": 50
118
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  {
120
  "epoch": 1.0,
121
- "step": 53,
122
  "total_flos": 0.0,
123
- "train_loss": 0.20106691993632406,
124
- "train_runtime": 384.3035,
125
- "train_samples_per_second": 17.564,
126
- "train_steps_per_second": 0.138
127
  }
128
  ],
129
  "logging_steps": 10,
130
- "max_steps": 53,
131
- "num_input_tokens_seen": 0,
132
  "num_train_epochs": 1,
133
  "save_steps": 100,
134
- "stateful_callbacks": {
135
- "TrainerControl": {
136
- "args": {
137
- "should_epoch_stop": false,
138
- "should_evaluate": false,
139
- "should_log": false,
140
- "should_save": false,
141
- "should_training_stop": false
142
- },
143
- "attributes": {}
144
- }
145
- },
146
  "total_flos": 0.0,
147
- "train_batch_size": 8,
148
  "trial_name": null,
149
  "trial_params": null
150
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.99830220713073,
5
  "eval_steps": 100,
6
+ "global_step": 441,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.111111111111111e-08,
14
+ "logits/chosen": -2.669281482696533,
15
+ "logits/rejected": -2.675659418106079,
16
+ "logps/chosen": -301.2757873535156,
17
+ "logps/rejected": -280.8008728027344,
18
+ "loss": 0.2803,
 
 
 
 
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
23
  "step": 1
24
  },
25
  {
26
+ "epoch": 0.02,
27
+ "learning_rate": 1.111111111111111e-07,
28
+ "logits/chosen": -2.8107264041900635,
29
+ "logits/rejected": -2.7811262607574463,
30
+ "logps/chosen": -320.0479736328125,
31
+ "logps/rejected": -195.18087768554688,
32
+ "loss": 0.2792,
33
+ "rewards/accuracies": 0.4826388955116272,
34
+ "rewards/chosen": 0.0006166233215481043,
35
+ "rewards/margins": 0.0009485264890827239,
36
+ "rewards/rejected": -0.00033190299291163683,
 
 
 
 
37
  "step": 10
38
  },
39
  {
40
+ "epoch": 0.05,
41
+ "learning_rate": 2.222222222222222e-07,
42
+ "logits/chosen": -2.8013899326324463,
43
+ "logits/rejected": -2.7626068592071533,
44
+ "logps/chosen": -350.6124572753906,
45
+ "logps/rejected": -191.4945831298828,
46
+ "loss": 0.2774,
47
+ "rewards/accuracies": 0.6625000238418579,
48
+ "rewards/chosen": 0.0069363838993012905,
49
+ "rewards/margins": 0.013156639412045479,
50
+ "rewards/rejected": -0.006220255978405476,
 
 
 
 
51
  "step": 20
52
  },
53
  {
54
+ "epoch": 0.07,
55
+ "learning_rate": 3.333333333333333e-07,
56
+ "logits/chosen": -2.7069194316864014,
57
+ "logits/rejected": -2.691702365875244,
58
+ "logps/chosen": -316.24737548828125,
59
+ "logps/rejected": -201.17063903808594,
60
+ "loss": 0.2782,
61
+ "rewards/accuracies": 0.6656249761581421,
62
+ "rewards/chosen": 0.0192975215613842,
63
+ "rewards/margins": 0.07039008289575577,
64
+ "rewards/rejected": -0.05109255388379097,
 
 
 
 
65
  "step": 30
66
  },
67
  {
68
+ "epoch": 0.09,
69
+ "learning_rate": 4.444444444444444e-07,
70
+ "logits/chosen": -2.606764554977417,
71
+ "logits/rejected": -2.5910491943359375,
72
+ "logps/chosen": -376.6419982910156,
73
+ "logps/rejected": -222.21923828125,
74
+ "loss": 0.2589,
75
+ "rewards/accuracies": 0.699999988079071,
76
+ "rewards/chosen": 0.015041938051581383,
77
+ "rewards/margins": 0.2519453465938568,
78
+ "rewards/rejected": -0.23690339922904968,
 
 
 
 
79
  "step": 40
80
  },
81
  {
82
+ "epoch": 0.11,
83
+ "learning_rate": 4.998033461515242e-07,
84
+ "logits/chosen": -2.5254263877868652,
85
+ "logits/rejected": -2.522778034210205,
86
+ "logps/chosen": -350.6036682128906,
87
+ "logps/rejected": -230.76931762695312,
88
+ "loss": 0.2151,
89
+ "rewards/accuracies": 0.637499988079071,
90
+ "rewards/chosen": -0.16026124358177185,
91
+ "rewards/margins": 0.36047226190567017,
92
+ "rewards/rejected": -0.5207335352897644,
 
 
 
 
93
  "step": 50
94
  },
95
+ {
96
+ "epoch": 0.14,
97
+ "learning_rate": 4.982319711683221e-07,
98
+ "logits/chosen": -2.509260654449463,
99
+ "logits/rejected": -2.4819066524505615,
100
+ "logps/chosen": -343.71575927734375,
101
+ "logps/rejected": -281.47723388671875,
102
+ "loss": 0.1652,
103
+ "rewards/accuracies": 0.703125,
104
+ "rewards/chosen": -0.3321402072906494,
105
+ "rewards/margins": 0.506214439868927,
106
+ "rewards/rejected": -0.8383547067642212,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.16,
111
+ "learning_rate": 4.950991058546892e-07,
112
+ "logits/chosen": -2.4952120780944824,
113
+ "logits/rejected": -2.4609100818634033,
114
+ "logps/chosen": -381.2798156738281,
115
+ "logps/rejected": -299.72149658203125,
116
+ "loss": 0.1193,
117
+ "rewards/accuracies": 0.7093750238418579,
118
+ "rewards/chosen": -0.518448531627655,
119
+ "rewards/margins": 0.5417992472648621,
120
+ "rewards/rejected": -1.0602478981018066,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.18,
125
+ "learning_rate": 4.904244573372733e-07,
126
+ "logits/chosen": -2.4261202812194824,
127
+ "logits/rejected": -2.4001305103302,
128
+ "logps/chosen": -423.95233154296875,
129
+ "logps/rejected": -343.5025939941406,
130
+ "loss": 0.0926,
131
+ "rewards/accuracies": 0.737500011920929,
132
+ "rewards/chosen": -0.8466840982437134,
133
+ "rewards/margins": 0.7109834551811218,
134
+ "rewards/rejected": -1.55766761302948,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.2,
139
+ "learning_rate": 4.842374312499405e-07,
140
+ "logits/chosen": -2.4205052852630615,
141
+ "logits/rejected": -2.3753650188446045,
142
+ "logps/chosen": -425.88934326171875,
143
+ "logps/rejected": -375.40478515625,
144
+ "loss": 0.0715,
145
+ "rewards/accuracies": 0.731249988079071,
146
+ "rewards/chosen": -1.0327210426330566,
147
+ "rewards/margins": 0.8036619424819946,
148
+ "rewards/rejected": -1.8363832235336304,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.23,
153
+ "learning_rate": 4.7657694675916247e-07,
154
+ "logits/chosen": -2.3601014614105225,
155
+ "logits/rejected": -2.3345751762390137,
156
+ "logps/chosen": -415.061279296875,
157
+ "logps/rejected": -360.3291931152344,
158
+ "loss": 0.0846,
159
+ "rewards/accuracies": 0.721875011920929,
160
+ "rewards/chosen": -0.9150521159172058,
161
+ "rewards/margins": 0.7828376889228821,
162
+ "rewards/rejected": -1.6978899240493774,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.23,
167
+ "eval_logits/chosen": -2.3521652221679688,
168
+ "eval_logits/rejected": -2.330230236053467,
169
+ "eval_logps/chosen": -403.4620361328125,
170
+ "eval_logps/rejected": -446.7535400390625,
171
+ "eval_loss": 0.08460698276758194,
172
+ "eval_rewards/accuracies": 0.6484375,
173
+ "eval_rewards/chosen": -1.4642237424850464,
174
+ "eval_rewards/margins": 0.42977917194366455,
175
+ "eval_rewards/rejected": -1.894002914428711,
176
+ "eval_runtime": 53.5062,
177
+ "eval_samples_per_second": 37.379,
178
+ "eval_steps_per_second": 0.598,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.25,
183
+ "learning_rate": 4.6749119174501973e-07,
184
+ "logits/chosen": -2.338693857192993,
185
+ "logits/rejected": -2.274574041366577,
186
+ "logps/chosen": -452.51953125,
187
+ "logps/rejected": -401.04058837890625,
188
+ "loss": 0.0679,
189
+ "rewards/accuracies": 0.7437499761581421,
190
+ "rewards/chosen": -1.2087438106536865,
191
+ "rewards/margins": 0.9248006939888,
192
+ "rewards/rejected": -2.133544445037842,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.27,
197
+ "learning_rate": 4.5703731967784265e-07,
198
+ "logits/chosen": -2.2874975204467773,
199
+ "logits/rejected": -2.2298505306243896,
200
+ "logps/chosen": -436.90283203125,
201
+ "logps/rejected": -372.56170654296875,
202
+ "loss": 0.0738,
203
+ "rewards/accuracies": 0.7718750238418579,
204
+ "rewards/chosen": -0.8776865005493164,
205
+ "rewards/margins": 0.9758397340774536,
206
+ "rewards/rejected": -1.8535263538360596,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.29,
211
+ "learning_rate": 4.4528109009727333e-07,
212
+ "logits/chosen": -2.297295093536377,
213
+ "logits/rejected": -2.2529804706573486,
214
+ "logps/chosen": -418.8360290527344,
215
+ "logps/rejected": -369.44451904296875,
216
+ "loss": 0.084,
217
+ "rewards/accuracies": 0.753125011920929,
218
+ "rewards/chosen": -0.8223110437393188,
219
+ "rewards/margins": 0.993333637714386,
220
+ "rewards/rejected": -1.8156448602676392,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.32,
225
+ "learning_rate": 4.3229645495529427e-07,
226
+ "logits/chosen": -2.30572509765625,
227
+ "logits/rejected": -2.214725971221924,
228
+ "logps/chosen": -461.4957580566406,
229
+ "logps/rejected": -416.990234375,
230
+ "loss": 0.066,
231
+ "rewards/accuracies": 0.7562500238418579,
232
+ "rewards/chosen": -1.087278962135315,
233
+ "rewards/margins": 1.041991949081421,
234
+ "rewards/rejected": -2.1292710304260254,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.34,
239
+ "learning_rate": 4.1816509342531317e-07,
240
+ "logits/chosen": -2.2726972103118896,
241
+ "logits/rejected": -2.196261405944824,
242
+ "logps/chosen": -419.60479736328125,
243
+ "logps/rejected": -361.95831298828125,
244
+ "loss": 0.0853,
245
+ "rewards/accuracies": 0.78125,
246
+ "rewards/chosen": -0.6499409675598145,
247
+ "rewards/margins": 1.046847939491272,
248
+ "rewards/rejected": -1.696789026260376,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.36,
253
+ "learning_rate": 4.0297589810356166e-07,
254
+ "logits/chosen": -2.188967227935791,
255
+ "logits/rejected": -2.119161605834961,
256
+ "logps/chosen": -450.3418884277344,
257
+ "logps/rejected": -422.260986328125,
258
+ "loss": 0.0621,
259
+ "rewards/accuracies": 0.734375,
260
+ "rewards/chosen": -1.2301867008209229,
261
+ "rewards/margins": 1.049591064453125,
262
+ "rewards/rejected": -2.279777765274048,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.38,
267
+ "learning_rate": 3.868244158348331e-07,
268
+ "logits/chosen": -2.135490894317627,
269
+ "logits/rejected": -2.0569894313812256,
270
+ "logps/chosen": -508.18511962890625,
271
+ "logps/rejected": -465.81689453125,
272
+ "loss": 0.0461,
273
+ "rewards/accuracies": 0.746874988079071,
274
+ "rewards/chosen": -1.621382474899292,
275
+ "rewards/margins": 1.0623977184295654,
276
+ "rewards/rejected": -2.6837801933288574,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.41,
281
+ "learning_rate": 3.698122466800142e-07,
282
+ "logits/chosen": -2.1748709678649902,
283
+ "logits/rejected": -2.0735726356506348,
284
+ "logps/chosen": -480.3050842285156,
285
+ "logps/rejected": -428.5189514160156,
286
+ "loss": 0.0508,
287
+ "rewards/accuracies": 0.7093750238418579,
288
+ "rewards/chosen": -1.518842101097107,
289
+ "rewards/margins": 0.8754765391349792,
290
+ "rewards/rejected": -2.3943190574645996,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.43,
295
+ "learning_rate": 3.5204640480617574e-07,
296
+ "logits/chosen": -2.104471206665039,
297
+ "logits/rejected": -2.028428077697754,
298
+ "logps/chosen": -493.2584533691406,
299
+ "logps/rejected": -451.6133728027344,
300
+ "loss": 0.0534,
301
+ "rewards/accuracies": 0.6968749761581421,
302
+ "rewards/chosen": -1.6701205968856812,
303
+ "rewards/margins": 0.9617946743965149,
304
+ "rewards/rejected": -2.63191556930542,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.45,
309
+ "learning_rate": 3.336386453195088e-07,
310
+ "logits/chosen": -2.155747890472412,
311
+ "logits/rejected": -2.0854601860046387,
312
+ "logps/chosen": -486.59954833984375,
313
+ "logps/rejected": -467.2601623535156,
314
+ "loss": 0.0477,
315
+ "rewards/accuracies": 0.7437499761581421,
316
+ "rewards/chosen": -1.5419719219207764,
317
+ "rewards/margins": 1.163585901260376,
318
+ "rewards/rejected": -2.7055578231811523,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.45,
323
+ "eval_logits/chosen": -2.1616616249084473,
324
+ "eval_logits/rejected": -2.128385305404663,
325
+ "eval_logps/chosen": -436.6204833984375,
326
+ "eval_logps/rejected": -497.52166748046875,
327
+ "eval_loss": 0.06721889227628708,
328
+ "eval_rewards/accuracies": 0.71484375,
329
+ "eval_rewards/chosen": -1.7958087921142578,
330
+ "eval_rewards/margins": 0.6058750152587891,
331
+ "eval_rewards/rejected": -2.401683807373047,
332
+ "eval_runtime": 53.3708,
333
+ "eval_samples_per_second": 37.474,
334
+ "eval_steps_per_second": 0.6,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.48,
339
+ "learning_rate": 3.147047612756302e-07,
340
+ "logits/chosen": -2.1386637687683105,
341
+ "logits/rejected": -2.041881561279297,
342
+ "logps/chosen": -493.37530517578125,
343
+ "logps/rejected": -443.93377685546875,
344
+ "loss": 0.0514,
345
+ "rewards/accuracies": 0.765625,
346
+ "rewards/chosen": -1.43257737159729,
347
+ "rewards/margins": 1.1513398885726929,
348
+ "rewards/rejected": -2.5839171409606934,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.5,
353
+ "learning_rate": 2.9536385528937565e-07,
354
+ "logits/chosen": -2.1365461349487305,
355
+ "logits/rejected": -2.0607683658599854,
356
+ "logps/chosen": -505.8017578125,
357
+ "logps/rejected": -459.7229919433594,
358
+ "loss": 0.0508,
359
+ "rewards/accuracies": 0.7718750238418579,
360
+ "rewards/chosen": -1.424095869064331,
361
+ "rewards/margins": 1.3351457118988037,
362
+ "rewards/rejected": -2.7592415809631348,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.52,
367
+ "learning_rate": 2.7573759032598365e-07,
368
+ "logits/chosen": -2.12797474861145,
369
+ "logits/rejected": -2.06542706489563,
370
+ "logps/chosen": -517.1934814453125,
371
+ "logps/rejected": -477.3455505371094,
372
+ "loss": 0.049,
373
+ "rewards/accuracies": 0.7437499761581421,
374
+ "rewards/chosen": -1.5975525379180908,
375
+ "rewards/margins": 1.0971721410751343,
376
+ "rewards/rejected": -2.6947245597839355,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.54,
381
+ "learning_rate": 2.5594942438652685e-07,
382
+ "logits/chosen": -2.052783250808716,
383
+ "logits/rejected": -1.9777238368988037,
384
+ "logps/chosen": -483.03057861328125,
385
+ "logps/rejected": -474.64654541015625,
386
+ "loss": 0.0412,
387
+ "rewards/accuracies": 0.7406250238418579,
388
+ "rewards/chosen": -1.6363245248794556,
389
+ "rewards/margins": 1.1237612962722778,
390
+ "rewards/rejected": -2.7600855827331543,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.57,
395
+ "learning_rate": 2.36123833901765e-07,
396
+ "logits/chosen": -2.0231640338897705,
397
+ "logits/rejected": -1.9347641468048096,
398
+ "logps/chosen": -503.3892517089844,
399
+ "logps/rejected": -482.2828674316406,
400
+ "loss": 0.0391,
401
+ "rewards/accuracies": 0.7749999761581421,
402
+ "rewards/chosen": -1.7641624212265015,
403
+ "rewards/margins": 1.1827863454818726,
404
+ "rewards/rejected": -2.946949005126953,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.59,
409
+ "learning_rate": 2.1638553071961704e-07,
410
+ "logits/chosen": -1.9997365474700928,
411
+ "logits/rejected": -1.919660210609436,
412
+ "logps/chosen": -549.3283081054688,
413
+ "logps/rejected": -506.68707275390625,
414
+ "loss": 0.0364,
415
+ "rewards/accuracies": 0.7875000238418579,
416
+ "rewards/chosen": -1.842703104019165,
417
+ "rewards/margins": 1.4222638607025146,
418
+ "rewards/rejected": -3.2649669647216797,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.61,
423
+ "learning_rate": 1.968586776117558e-07,
424
+ "logits/chosen": -2.023411273956299,
425
+ "logits/rejected": -1.9109961986541748,
426
+ "logps/chosen": -555.9891357421875,
427
+ "logps/rejected": -512.0736083984375,
428
+ "loss": 0.0427,
429
+ "rewards/accuracies": 0.765625,
430
+ "rewards/chosen": -1.7782520055770874,
431
+ "rewards/margins": 1.3554075956344604,
432
+ "rewards/rejected": -3.133659839630127,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.63,
437
+ "learning_rate": 1.7766610723413684e-07,
438
+ "logits/chosen": -2.0062713623046875,
439
+ "logits/rejected": -1.9274126291275024,
440
+ "logps/chosen": -505.69110107421875,
441
+ "logps/rejected": -496.41864013671875,
442
+ "loss": 0.0421,
443
+ "rewards/accuracies": 0.7593749761581421,
444
+ "rewards/chosen": -1.7307058572769165,
445
+ "rewards/margins": 1.3143250942230225,
446
+ "rewards/rejected": -3.0450305938720703,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.66,
451
+ "learning_rate": 1.589285494545514e-07,
452
+ "logits/chosen": -2.0006046295166016,
453
+ "logits/rejected": -1.9192641973495483,
454
+ "logps/chosen": -496.51055908203125,
455
+ "logps/rejected": -483.0226135253906,
456
+ "loss": 0.0435,
457
+ "rewards/accuracies": 0.762499988079071,
458
+ "rewards/chosen": -1.7974494695663452,
459
+ "rewards/margins": 1.1823097467422485,
460
+ "rewards/rejected": -2.979759454727173,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.68,
465
+ "learning_rate": 1.4076387190766014e-07,
466
+ "logits/chosen": -1.9719831943511963,
467
+ "logits/rejected": -1.9076999425888062,
468
+ "logps/chosen": -481.85968017578125,
469
+ "logps/rejected": -481.77691650390625,
470
+ "loss": 0.046,
471
+ "rewards/accuracies": 0.753125011920929,
472
+ "rewards/chosen": -1.7264801263809204,
473
+ "rewards/margins": 1.1051851511001587,
474
+ "rewards/rejected": -2.831665277481079,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.68,
479
+ "eval_logits/chosen": -1.991421103477478,
480
+ "eval_logits/rejected": -1.9484151601791382,
481
+ "eval_logps/chosen": -471.8780517578125,
482
+ "eval_logps/rejected": -544.56982421875,
483
+ "eval_loss": 0.055175185203552246,
484
+ "eval_rewards/accuracies": 0.71484375,
485
+ "eval_rewards/chosen": -2.1483840942382812,
486
+ "eval_rewards/margins": 0.7237809896469116,
487
+ "eval_rewards/rejected": -2.8721652030944824,
488
+ "eval_runtime": 53.342,
489
+ "eval_samples_per_second": 37.494,
490
+ "eval_steps_per_second": 0.6,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.7,
495
+ "learning_rate": 1.232863385547543e-07,
496
+ "logits/chosen": -1.9471126794815063,
497
+ "logits/rejected": -1.8728282451629639,
498
+ "logps/chosen": -499.8617248535156,
499
+ "logps/rejected": -500.9742126464844,
500
+ "loss": 0.0415,
501
+ "rewards/accuracies": 0.768750011920929,
502
+ "rewards/chosen": -1.7383416891098022,
503
+ "rewards/margins": 1.4027913808822632,
504
+ "rewards/rejected": -3.1411330699920654,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.72,
509
+ "learning_rate": 1.0660589091223854e-07,
510
+ "logits/chosen": -1.9511429071426392,
511
+ "logits/rejected": -1.8690084218978882,
512
+ "logps/chosen": -515.9614868164062,
513
+ "logps/rejected": -489.18853759765625,
514
+ "loss": 0.0424,
515
+ "rewards/accuracies": 0.765625,
516
+ "rewards/chosen": -1.7780258655548096,
517
+ "rewards/margins": 1.2824206352233887,
518
+ "rewards/rejected": -3.060446262359619,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.75,
523
+ "learning_rate": 9.082745647022797e-08,
524
+ "logits/chosen": -1.9991905689239502,
525
+ "logits/rejected": -1.9214942455291748,
526
+ "logps/chosen": -518.4646606445312,
527
+ "logps/rejected": -501.2618713378906,
528
+ "loss": 0.0439,
529
+ "rewards/accuracies": 0.768750011920929,
530
+ "rewards/chosen": -1.7493177652359009,
531
+ "rewards/margins": 1.3284794092178345,
532
+ "rewards/rejected": -3.0777969360351562,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.77,
537
+ "learning_rate": 7.605028865161809e-08,
538
+ "logits/chosen": -1.994573950767517,
539
+ "logits/rejected": -1.9131485223770142,
540
+ "logps/chosen": -523.8150024414062,
541
+ "logps/rejected": -497.4427795410156,
542
+ "loss": 0.0414,
543
+ "rewards/accuracies": 0.778124988079071,
544
+ "rewards/chosen": -1.7702951431274414,
545
+ "rewards/margins": 1.2257777452468872,
546
+ "rewards/rejected": -2.996073007583618,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.79,
551
+ "learning_rate": 6.236734246357947e-08,
552
+ "logits/chosen": -1.9538482427597046,
553
+ "logits/rejected": -1.849259614944458,
554
+ "logps/chosen": -493.80450439453125,
555
+ "logps/rejected": -493.0531311035156,
556
+ "loss": 0.0447,
557
+ "rewards/accuracies": 0.7875000238418579,
558
+ "rewards/chosen": -1.6410820484161377,
559
+ "rewards/margins": 1.3493789434432983,
560
+ "rewards/rejected": -2.9904608726501465,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.81,
565
+ "learning_rate": 4.986468976890992e-08,
566
+ "logits/chosen": -1.9808590412139893,
567
+ "logits/rejected": -1.8965733051300049,
568
+ "logps/chosen": -507.7171936035156,
569
+ "logps/rejected": -484.1426696777344,
570
+ "loss": 0.0423,
571
+ "rewards/accuracies": 0.768750011920929,
572
+ "rewards/chosen": -1.6426427364349365,
573
+ "rewards/margins": 1.3561906814575195,
574
+ "rewards/rejected": -2.998833417892456,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.84,
579
+ "learning_rate": 3.8620977855448936e-08,
580
+ "logits/chosen": -1.9780423641204834,
581
+ "logits/rejected": -1.884387731552124,
582
+ "logps/chosen": -519.645751953125,
583
+ "logps/rejected": -487.0723571777344,
584
+ "loss": 0.0454,
585
+ "rewards/accuracies": 0.71875,
586
+ "rewards/chosen": -1.856579065322876,
587
+ "rewards/margins": 1.1135355234146118,
588
+ "rewards/rejected": -2.9701147079467773,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.86,
593
+ "learning_rate": 2.8706934709395893e-08,
594
+ "logits/chosen": -1.99289870262146,
595
+ "logits/rejected": -1.9017293453216553,
596
+ "logps/chosen": -520.5479736328125,
597
+ "logps/rejected": -500.9642028808594,
598
+ "loss": 0.0424,
599
+ "rewards/accuracies": 0.8062499761581421,
600
+ "rewards/chosen": -1.6913812160491943,
601
+ "rewards/margins": 1.3811187744140625,
602
+ "rewards/rejected": -3.072500228881836,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.88,
607
+ "learning_rate": 2.0184924104583612e-08,
608
+ "logits/chosen": -1.988921880722046,
609
+ "logits/rejected": -1.891033411026001,
610
+ "logps/chosen": -521.970458984375,
611
+ "logps/rejected": -489.20501708984375,
612
+ "loss": 0.0425,
613
+ "rewards/accuracies": 0.796875,
614
+ "rewards/chosen": -1.6584064960479736,
615
+ "rewards/margins": 1.3673207759857178,
616
+ "rewards/rejected": -3.0257275104522705,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.91,
621
+ "learning_rate": 1.3108553306396263e-08,
622
+ "logits/chosen": -2.0064077377319336,
623
+ "logits/rejected": -1.9138851165771484,
624
+ "logps/chosen": -516.9049072265625,
625
+ "logps/rejected": -482.49859619140625,
626
+ "loss": 0.0439,
627
+ "rewards/accuracies": 0.7437499761581421,
628
+ "rewards/chosen": -1.8265987634658813,
629
+ "rewards/margins": 1.1020760536193848,
630
+ "rewards/rejected": -2.9286749362945557,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.91,
635
+ "eval_logits/chosen": -1.9848593473434448,
636
+ "eval_logits/rejected": -1.94070303440094,
637
+ "eval_logps/chosen": -476.7722473144531,
638
+ "eval_logps/rejected": -549.7876586914062,
639
+ "eval_loss": 0.054434459656476974,
640
+ "eval_rewards/accuracies": 0.70703125,
641
+ "eval_rewards/chosen": -2.1973259449005127,
642
+ "eval_rewards/margins": 0.7270177602767944,
643
+ "eval_rewards/rejected": -2.9243435859680176,
644
+ "eval_runtime": 53.3853,
645
+ "eval_samples_per_second": 37.463,
646
+ "eval_steps_per_second": 0.599,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.93,
651
+ "learning_rate": 7.522335858048705e-09,
652
+ "logits/chosen": -1.963323950767517,
653
+ "logits/rejected": -1.8955034017562866,
654
+ "logps/chosen": -518.6998901367188,
655
+ "logps/rejected": -522.4810791015625,
656
+ "loss": 0.0451,
657
+ "rewards/accuracies": 0.784375011920929,
658
+ "rewards/chosen": -1.8480857610702515,
659
+ "rewards/margins": 1.2641350030899048,
660
+ "rewards/rejected": -3.112220287322998,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.95,
665
+ "learning_rate": 3.4614115704533766e-09,
666
+ "logits/chosen": -1.9797197580337524,
667
+ "logits/rejected": -1.9050052165985107,
668
+ "logps/chosen": -495.5513610839844,
669
+ "logps/rejected": -489.12384033203125,
670
+ "loss": 0.0427,
671
+ "rewards/accuracies": 0.7718750238418579,
672
+ "rewards/chosen": -1.9035917520523071,
673
+ "rewards/margins": 1.1141357421875,
674
+ "rewards/rejected": -3.0177273750305176,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.97,
679
+ "learning_rate": 9.513254770636137e-10,
680
+ "logits/chosen": -1.9577579498291016,
681
+ "logits/rejected": -1.8832132816314697,
682
+ "logps/chosen": -509.35406494140625,
683
+ "logps/rejected": -482.31640625,
684
+ "loss": 0.0437,
685
+ "rewards/accuracies": 0.737500011920929,
686
+ "rewards/chosen": -1.8531081676483154,
687
+ "rewards/margins": 1.0920084714889526,
688
+ "rewards/rejected": -2.9451169967651367,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 1.0,
693
+ "learning_rate": 7.867144166728844e-12,
694
+ "logits/chosen": -1.9990612268447876,
695
+ "logits/rejected": -1.9326860904693604,
696
+ "logps/chosen": -519.8091430664062,
697
+ "logps/rejected": -503.39385986328125,
698
+ "loss": 0.0409,
699
+ "rewards/accuracies": 0.7437499761581421,
700
+ "rewards/chosen": -1.7259622812271118,
701
+ "rewards/margins": 1.2851760387420654,
702
+ "rewards/rejected": -3.0111382007598877,
703
+ "step": 440
704
+ },
705
  {
706
  "epoch": 1.0,
707
+ "step": 441,
708
  "total_flos": 0.0,
709
+ "train_loss": 0.07998780592802972,
710
+ "train_runtime": 7378.9712,
711
+ "train_samples_per_second": 15.318,
712
+ "train_steps_per_second": 0.06
713
  }
714
  ],
715
  "logging_steps": 10,
716
+ "max_steps": 441,
 
717
  "num_train_epochs": 1,
718
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
719
  "total_flos": 0.0,
 
720
  "trial_name": null,
721
  "trial_params": null
722
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8c151d7becb900bed631e41dff74cbdb1243adce5d3b22205a355b75f2b0912
3
  size 5944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1bc12ab9a777f3e948ad4888c0b78fbb69873a1cca9e5f6148ba6edfc1e8386
3
  size 5944