shaurya-06 commited on
Commit
721d7d8
1 Parent(s): d91a210

shaurya-06/adapter

Browse files
README.md CHANGED
@@ -46,7 +46,7 @@ The following hyperparameters were used during training:
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_ratio: 0.03
49
- - num_epochs: 10
50
 
51
  ### Training results
52
 
 
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_ratio: 0.03
49
+ - num_epochs: 15
50
 
51
  ### Training results
52
 
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b89ebdcdd1526c540859ee6d1c964e45224b8e532ab7c36722c70ab6c9dcd18c
3
  size 15734784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40e249aa5cad4bb1b7d9575ed7d47d455a84d71b94d53384e03bc1c6e5effa39
3
  size 15734784
runs/Apr28_21-24-34_757a20c23d9b/events.out.tfevents.1714339477.757a20c23d9b.200681.15 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f663efec6bd8ba8c52344b4f9163479e5c7b28256c75f9a4ee9e39094c1b696b
3
+ size 7566
runs/Apr28_21-25-46_757a20c23d9b/events.out.tfevents.1714339550.757a20c23d9b.200681.16 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7609afb8c70a30e2dc5211ecc898d0051b5aa52ecf354601ec6c1a02e28c4a6e
3
+ size 8416
trainer_state.json CHANGED
@@ -1,197 +1,113 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 9.6,
5
  "eval_steps": 500,
6
- "global_step": 120,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
- {
12
- "epoch": 0.4,
13
- "grad_norm": 0.46419957280158997,
14
- "learning_rate": 0.0001999633286223284,
15
- "loss": 0.5697,
16
- "step": 5
17
- },
18
- {
19
- "epoch": 0.8,
20
- "grad_norm": 0.47279733419418335,
21
- "learning_rate": 0.00019868265225415265,
22
- "loss": 0.581,
23
- "step": 10
24
- },
25
  {
26
  "epoch": 1.2,
27
- "grad_norm": 0.34838807582855225,
28
- "learning_rate": 0.00019559521426716118,
29
- "loss": 0.5531,
30
  "step": 15
31
  },
32
- {
33
- "epoch": 1.6,
34
- "grad_norm": 0.4025287628173828,
35
- "learning_rate": 0.00019075754196709572,
36
- "loss": 0.528,
37
- "step": 20
38
- },
39
- {
40
- "epoch": 2.0,
41
- "grad_norm": 0.5065444707870483,
42
- "learning_rate": 0.0001842582073616649,
43
- "loss": 0.5522,
44
- "step": 25
45
- },
46
  {
47
  "epoch": 2.4,
48
- "grad_norm": 0.47670507431030273,
49
- "learning_rate": 0.00017621620551276366,
50
- "loss": 0.5206,
51
  "step": 30
52
  },
53
- {
54
- "epoch": 2.8,
55
- "grad_norm": 0.39348119497299194,
56
- "learning_rate": 0.00016677877587886956,
57
- "loss": 0.5305,
58
- "step": 35
59
- },
60
- {
61
- "epoch": 3.2,
62
- "grad_norm": 0.5051579475402832,
63
- "learning_rate": 0.00015611870653623825,
64
- "loss": 0.5061,
65
- "step": 40
66
- },
67
  {
68
  "epoch": 3.6,
69
- "grad_norm": 0.43032407760620117,
70
- "learning_rate": 0.00014443117063539038,
71
- "loss": 0.4894,
72
  "step": 45
73
  },
74
- {
75
- "epoch": 4.0,
76
- "grad_norm": 0.42734888195991516,
77
- "learning_rate": 0.000131930153013598,
78
- "loss": 0.4779,
79
- "step": 50
80
- },
81
- {
82
- "epoch": 4.4,
83
- "grad_norm": 0.3881720006465912,
84
- "learning_rate": 0.00011884453238783185,
85
- "loss": 0.4904,
86
- "step": 55
87
- },
88
  {
89
  "epoch": 4.8,
90
- "grad_norm": 0.4254741072654724,
91
- "learning_rate": 0.00010541389085854176,
92
- "loss": 0.4678,
93
  "step": 60
94
  },
95
- {
96
- "epoch": 5.2,
97
- "grad_norm": 0.3979746997356415,
98
- "learning_rate": 9.18841274472569e-05,
99
- "loss": 0.4605,
100
- "step": 65
101
- },
102
- {
103
- "epoch": 5.6,
104
- "grad_norm": 0.5344672799110413,
105
- "learning_rate": 7.85029559788976e-05,
106
- "loss": 0.4667,
107
- "step": 70
108
- },
109
  {
110
  "epoch": 6.0,
111
- "grad_norm": 0.44632646441459656,
112
- "learning_rate": 6.551536973720298e-05,
113
- "loss": 0.4588,
114
  "step": 75
115
  },
116
- {
117
- "epoch": 6.4,
118
- "grad_norm": 0.45458871126174927,
119
- "learning_rate": 5.3159155930021e-05,
120
- "loss": 0.4698,
121
- "step": 80
122
- },
123
- {
124
- "epoch": 6.8,
125
- "grad_norm": 0.46564164757728577,
126
- "learning_rate": 4.16605420892506e-05,
127
- "loss": 0.4429,
128
- "step": 85
129
- },
130
  {
131
  "epoch": 7.2,
132
- "grad_norm": 0.4069627523422241,
133
- "learning_rate": 3.123005411465766e-05,
134
- "loss": 0.451,
135
  "step": 90
136
  },
137
  {
138
- "epoch": 7.6,
139
- "grad_norm": 0.3835998475551605,
140
- "learning_rate": 2.205866179584084e-05,
141
- "loss": 0.4238,
142
- "step": 95
143
  },
144
  {
145
- "epoch": 8.0,
146
- "grad_norm": 0.4063926935195923,
147
- "learning_rate": 1.4314282383241096e-05,
148
- "loss": 0.4493,
149
- "step": 100
150
  },
151
  {
152
- "epoch": 8.4,
153
- "grad_norm": 0.3951762318611145,
154
- "learning_rate": 8.138706223637827e-06,
155
- "loss": 0.4394,
156
- "step": 105
157
  },
158
  {
159
- "epoch": 8.8,
160
- "grad_norm": 0.408200204372406,
161
- "learning_rate": 3.6450007480777093e-06,
162
- "loss": 0.4367,
163
- "step": 110
164
  },
165
  {
166
- "epoch": 9.2,
167
- "grad_norm": 0.5846555233001709,
168
- "learning_rate": 9.154403421193225e-07,
169
- "loss": 0.429,
170
- "step": 115
171
  },
172
  {
173
- "epoch": 9.6,
174
- "grad_norm": 0.3439011573791504,
175
  "learning_rate": 0.0,
176
- "loss": 0.4372,
177
- "step": 120
178
  },
179
  {
180
- "epoch": 9.6,
181
- "step": 120,
182
- "total_flos": 77528353996800.0,
183
- "train_loss": 0.4846629718939463,
184
- "train_runtime": 54.9394,
185
- "train_samples_per_second": 18.202,
186
- "train_steps_per_second": 2.184
187
  }
188
  ],
189
- "logging_steps": 5,
190
- "max_steps": 120,
191
  "num_input_tokens_seen": 0,
192
- "num_train_epochs": 10,
193
  "save_steps": 500,
194
- "total_flos": 77528353996800.0,
195
  "train_batch_size": 4,
196
  "trial_name": null,
197
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 14.4,
5
  "eval_steps": 500,
6
+ "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  {
12
  "epoch": 1.2,
13
+ "grad_norm": 0.8679821491241455,
14
+ "learning_rate": 0.00019868265225415265,
15
+ "loss": 4.9095,
16
  "step": 15
17
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  {
19
  "epoch": 2.4,
20
+ "grad_norm": 1.064207911491394,
21
+ "learning_rate": 0.00019075754196709572,
22
+ "loss": 3.2019,
23
  "step": 30
24
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  {
26
  "epoch": 3.6,
27
+ "grad_norm": 0.6014457941055298,
28
+ "learning_rate": 0.00017621620551276366,
29
+ "loss": 1.7706,
30
  "step": 45
31
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  {
33
  "epoch": 4.8,
34
+ "grad_norm": 0.4954649806022644,
35
+ "learning_rate": 0.00015611870653623825,
36
+ "loss": 1.2936,
37
  "step": 60
38
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  {
40
  "epoch": 6.0,
41
+ "grad_norm": 0.418344110250473,
42
+ "learning_rate": 0.000131930153013598,
43
+ "loss": 1.0465,
44
  "step": 75
45
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  {
47
  "epoch": 7.2,
48
+ "grad_norm": 0.40778854489326477,
49
+ "learning_rate": 0.00010541389085854176,
50
+ "loss": 0.9186,
51
  "step": 90
52
  },
53
  {
54
+ "epoch": 8.4,
55
+ "grad_norm": 0.3741260766983032,
56
+ "learning_rate": 7.85029559788976e-05,
57
+ "loss": 0.8364,
58
+ "step": 105
59
  },
60
  {
61
+ "epoch": 9.6,
62
+ "grad_norm": 0.4041515290737152,
63
+ "learning_rate": 5.3159155930021e-05,
64
+ "loss": 0.7861,
65
+ "step": 120
66
  },
67
  {
68
+ "epoch": 10.8,
69
+ "grad_norm": 0.3697131872177124,
70
+ "learning_rate": 3.123005411465766e-05,
71
+ "loss": 0.7488,
72
+ "step": 135
73
  },
74
  {
75
+ "epoch": 12.0,
76
+ "grad_norm": 0.4183999001979828,
77
+ "learning_rate": 1.4314282383241096e-05,
78
+ "loss": 0.7274,
79
+ "step": 150
80
  },
81
  {
82
+ "epoch": 13.2,
83
+ "grad_norm": 0.4263518154621124,
84
+ "learning_rate": 3.6450007480777093e-06,
85
+ "loss": 0.7245,
86
+ "step": 165
87
  },
88
  {
89
+ "epoch": 14.4,
90
+ "grad_norm": 0.3251570165157318,
91
  "learning_rate": 0.0,
92
+ "loss": 0.7288,
93
+ "step": 180
94
  },
95
  {
96
+ "epoch": 14.4,
97
+ "step": 180,
98
+ "total_flos": 116292530995200.0,
99
+ "train_loss": 1.4744000752766928,
100
+ "train_runtime": 81.674,
101
+ "train_samples_per_second": 18.366,
102
+ "train_steps_per_second": 2.204
103
  }
104
  ],
105
+ "logging_steps": 15,
106
+ "max_steps": 180,
107
  "num_input_tokens_seen": 0,
108
+ "num_train_epochs": 15,
109
  "save_steps": 500,
110
+ "total_flos": 116292530995200.0,
111
  "train_batch_size": 4,
112
  "trial_name": null,
113
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:99d9de74d1a3b9b5e5e92d75fa38b4585c6562b7ce7aaa44691e6d0f1d9a8f5b
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c67cb9db299dcc048f1694cd0a11b03c9a64221d6c98c783770f63b50ac522de
3
  size 5176