Training in progress, step 39600
Browse files- adapter_model.safetensors +1 -1
- trainer_log.jsonl +41 -0
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 460928
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:402e03e85246688d1e40ab33da5915b51f33208f62ded640aedeb430fc4477cc
|
| 3 |
size 460928
|
trainer_log.jsonl
CHANGED
|
@@ -8075,3 +8075,44 @@
|
|
| 8075 |
{"current_steps": 39395, "total_steps": 40000, "loss": 0.0, "lr": 0.00016986515083774467, "epoch": 279.39928698752226, "percentage": 98.49, "elapsed_time": "4:47:58", "remaining_time": "0:04:25", "throughput": 5820.45, "total_tokens": 100571184}
|
| 8076 |
{"current_steps": 39400, "total_steps": 40000, "loss": 0.0, "lr": 0.00016707417762611975, "epoch": 279.4349376114082, "percentage": 98.5, "elapsed_time": "4:48:01", "remaining_time": "0:04:23", "throughput": 5820.55, "total_tokens": 100585296}
|
| 8077 |
{"current_steps": 39400, "total_steps": 40000, "eval_loss": 0.7336431741714478, "epoch": 279.4349376114082, "percentage": 98.5, "elapsed_time": "4:48:04", "remaining_time": "0:04:23", "throughput": 5819.45, "total_tokens": 100585296}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8075 |
{"current_steps": 39395, "total_steps": 40000, "loss": 0.0, "lr": 0.00016986515083774467, "epoch": 279.39928698752226, "percentage": 98.49, "elapsed_time": "4:47:58", "remaining_time": "0:04:25", "throughput": 5820.45, "total_tokens": 100571184}
|
| 8076 |
{"current_steps": 39400, "total_steps": 40000, "loss": 0.0, "lr": 0.00016707417762611975, "epoch": 279.4349376114082, "percentage": 98.5, "elapsed_time": "4:48:01", "remaining_time": "0:04:23", "throughput": 5820.55, "total_tokens": 100585296}
|
| 8077 |
{"current_steps": 39400, "total_steps": 40000, "eval_loss": 0.7336431741714478, "epoch": 279.4349376114082, "percentage": 98.5, "elapsed_time": "4:48:04", "remaining_time": "0:04:23", "throughput": 5819.45, "total_tokens": 100585296}
|
| 8078 |
+
{"current_steps": 39405, "total_steps": 40000, "loss": 0.0, "lr": 0.00016430631053459543, "epoch": 279.47058823529414, "percentage": 98.51, "elapsed_time": "4:48:07", "remaining_time": "0:04:21", "throughput": 5818.96, "total_tokens": 100597488}
|
| 8079 |
+
{"current_steps": 39410, "total_steps": 40000, "loss": 0.0, "lr": 0.0001615615499899803, "epoch": 279.50623885918003, "percentage": 98.52, "elapsed_time": "4:48:09", "remaining_time": "0:04:18", "throughput": 5819.02, "total_tokens": 100610640}
|
| 8080 |
+
{"current_steps": 39415, "total_steps": 40000, "loss": 0.0, "lr": 0.00015883989641556905, "epoch": 279.541889483066, "percentage": 98.54, "elapsed_time": "4:48:12", "remaining_time": "0:04:16", "throughput": 5819.06, "total_tokens": 100623152}
|
| 8081 |
+
{"current_steps": 39420, "total_steps": 40000, "loss": 0.0, "lr": 0.00015614135023105934, "epoch": 279.57754010695186, "percentage": 98.55, "elapsed_time": "4:48:14", "remaining_time": "0:04:14", "throughput": 5819.06, "total_tokens": 100635216}
|
| 8082 |
+
{"current_steps": 39425, "total_steps": 40000, "loss": 0.0, "lr": 0.00015346591185261827, "epoch": 279.6131907308378, "percentage": 98.56, "elapsed_time": "4:48:16", "remaining_time": "0:04:12", "throughput": 5819.11, "total_tokens": 100647888}
|
| 8083 |
+
{"current_steps": 39430, "total_steps": 40000, "loss": 0.0, "lr": 0.00015081358169281576, "epoch": 279.6488413547237, "percentage": 98.58, "elapsed_time": "4:48:18", "remaining_time": "0:04:10", "throughput": 5819.17, "total_tokens": 100661040}
|
| 8084 |
+
{"current_steps": 39435, "total_steps": 40000, "loss": 0.0, "lr": 0.00014818436016069135, "epoch": 279.6844919786096, "percentage": 98.59, "elapsed_time": "4:48:20", "remaining_time": "0:04:07", "throughput": 5819.17, "total_tokens": 100672848}
|
| 8085 |
+
{"current_steps": 39440, "total_steps": 40000, "loss": 0.0, "lr": 0.00014557824766168735, "epoch": 279.72014260249557, "percentage": 98.6, "elapsed_time": "4:48:22", "remaining_time": "0:04:05", "throughput": 5819.27, "total_tokens": 100686992}
|
| 8086 |
+
{"current_steps": 39445, "total_steps": 40000, "loss": 0.0, "lr": 0.00014299524459769896, "epoch": 279.75579322638146, "percentage": 98.61, "elapsed_time": "4:48:24", "remaining_time": "0:04:03", "throughput": 5819.36, "total_tokens": 100700912}
|
| 8087 |
+
{"current_steps": 39450, "total_steps": 40000, "loss": 0.0, "lr": 0.0001404353513670742, "epoch": 279.7914438502674, "percentage": 98.62, "elapsed_time": "4:48:26", "remaining_time": "0:04:01", "throughput": 5819.42, "total_tokens": 100713936}
|
| 8088 |
+
{"current_steps": 39455, "total_steps": 40000, "loss": 0.0, "lr": 0.0001378985683645806, "epoch": 279.8270944741533, "percentage": 98.64, "elapsed_time": "4:48:28", "remaining_time": "0:03:59", "throughput": 5819.45, "total_tokens": 100726448}
|
| 8089 |
+
{"current_steps": 39460, "total_steps": 40000, "loss": 0.0, "lr": 0.0001353848959813886, "epoch": 279.8627450980392, "percentage": 98.65, "elapsed_time": "4:48:30", "remaining_time": "0:03:56", "throughput": 5819.56, "total_tokens": 100740720}
|
| 8090 |
+
{"current_steps": 39465, "total_steps": 40000, "loss": 0.0, "lr": 0.00013289433460517142, "epoch": 279.8983957219251, "percentage": 98.66, "elapsed_time": "4:48:32", "remaining_time": "0:03:54", "throughput": 5819.63, "total_tokens": 100754096}
|
| 8091 |
+
{"current_steps": 39470, "total_steps": 40000, "loss": 0.0, "lr": 0.00013042688462000518, "epoch": 279.93404634581105, "percentage": 98.67, "elapsed_time": "4:48:34", "remaining_time": "0:03:52", "throughput": 5819.7, "total_tokens": 100767408}
|
| 8092 |
+
{"current_steps": 39475, "total_steps": 40000, "loss": 0.0, "lr": 0.0001279825464063855, "epoch": 279.969696969697, "percentage": 98.69, "elapsed_time": "4:48:36", "remaining_time": "0:03:50", "throughput": 5819.78, "total_tokens": 100780624}
|
| 8093 |
+
{"current_steps": 39480, "total_steps": 40000, "loss": 0.0, "lr": 0.00012556132034126087, "epoch": 280.0, "percentage": 98.7, "elapsed_time": "4:48:38", "remaining_time": "0:03:48", "throughput": 5819.75, "total_tokens": 100790968}
|
| 8094 |
+
{"current_steps": 39485, "total_steps": 40000, "loss": 0.0, "lr": 0.0001231632067980326, "epoch": 280.03565062388594, "percentage": 98.71, "elapsed_time": "4:48:41", "remaining_time": "0:03:45", "throughput": 5819.79, "total_tokens": 100805272}
|
| 8095 |
+
{"current_steps": 39490, "total_steps": 40000, "loss": 0.0, "lr": 0.00012078820614650486, "epoch": 280.0713012477718, "percentage": 98.72, "elapsed_time": "4:48:43", "remaining_time": "0:03:43", "throughput": 5819.87, "total_tokens": 100818936}
|
| 8096 |
+
{"current_steps": 39495, "total_steps": 40000, "loss": 0.0, "lr": 0.00011843631875291804, "epoch": 280.10695187165777, "percentage": 98.74, "elapsed_time": "4:48:45", "remaining_time": "0:03:41", "throughput": 5819.9, "total_tokens": 100831288}
|
| 8097 |
+
{"current_steps": 39500, "total_steps": 40000, "loss": 0.0, "lr": 0.00011610754497999863, "epoch": 280.14260249554366, "percentage": 98.75, "elapsed_time": "4:48:47", "remaining_time": "0:03:39", "throughput": 5819.92, "total_tokens": 100843448}
|
| 8098 |
+
{"current_steps": 39505, "total_steps": 40000, "loss": 0.0, "lr": 0.0001138018851868594, "epoch": 280.1782531194296, "percentage": 98.76, "elapsed_time": "4:48:49", "remaining_time": "0:03:37", "throughput": 5819.98, "total_tokens": 100856824}
|
| 8099 |
+
{"current_steps": 39510, "total_steps": 40000, "loss": 0.0, "lr": 0.0001115193397290326, "epoch": 280.2139037433155, "percentage": 98.78, "elapsed_time": "4:48:51", "remaining_time": "0:03:34", "throughput": 5820.06, "total_tokens": 100870232}
|
| 8100 |
+
{"current_steps": 39515, "total_steps": 40000, "loss": 0.0, "lr": 0.00010925990895856996, "epoch": 280.2495543672014, "percentage": 98.79, "elapsed_time": "4:48:53", "remaining_time": "0:03:32", "throughput": 5820.12, "total_tokens": 100883352}
|
| 8101 |
+
{"current_steps": 39520, "total_steps": 40000, "loss": 0.0, "lr": 0.00010702359322385946, "epoch": 280.28520499108737, "percentage": 98.8, "elapsed_time": "4:48:55", "remaining_time": "0:03:30", "throughput": 5820.13, "total_tokens": 100895480}
|
| 8102 |
+
{"current_steps": 39525, "total_steps": 40000, "loss": 0.0, "lr": 0.00010481039286977523, "epoch": 280.32085561497325, "percentage": 98.81, "elapsed_time": "4:48:57", "remaining_time": "0:03:28", "throughput": 5820.16, "total_tokens": 100907768}
|
| 8103 |
+
{"current_steps": 39530, "total_steps": 40000, "loss": 0.0, "lr": 0.00010262030823764423, "epoch": 280.3565062388592, "percentage": 98.83, "elapsed_time": "4:48:59", "remaining_time": "0:03:26", "throughput": 5820.2, "total_tokens": 100920408}
|
| 8104 |
+
{"current_steps": 39535, "total_steps": 40000, "loss": 0.0, "lr": 0.00010045333966517966, "epoch": 280.3921568627451, "percentage": 98.84, "elapsed_time": "4:49:01", "remaining_time": "0:03:23", "throughput": 5820.2, "total_tokens": 100932248}
|
| 8105 |
+
{"current_steps": 39540, "total_steps": 40000, "loss": 0.0, "lr": 9.83094874865642e-05, "epoch": 280.427807486631, "percentage": 98.85, "elapsed_time": "4:49:03", "remaining_time": "0:03:21", "throughput": 5820.23, "total_tokens": 100944728}
|
| 8106 |
+
{"current_steps": 39545, "total_steps": 40000, "loss": 0.0, "lr": 9.618875203241672e-05, "epoch": 280.4634581105169, "percentage": 98.86, "elapsed_time": "4:49:05", "remaining_time": "0:03:19", "throughput": 5820.32, "total_tokens": 100958328}
|
| 8107 |
+
{"current_steps": 39550, "total_steps": 40000, "loss": 0.0, "lr": 9.409113362977561e-05, "epoch": 280.49910873440285, "percentage": 98.88, "elapsed_time": "4:49:07", "remaining_time": "0:03:17", "throughput": 5820.35, "total_tokens": 100970904}
|
| 8108 |
+
{"current_steps": 39555, "total_steps": 40000, "loss": 0.0, "lr": 9.20166326020988e-05, "epoch": 280.5347593582888, "percentage": 98.89, "elapsed_time": "4:49:10", "remaining_time": "0:03:15", "throughput": 5820.44, "total_tokens": 100984728}
|
| 8109 |
+
{"current_steps": 39560, "total_steps": 40000, "loss": 0.0, "lr": 8.996524926933035e-05, "epoch": 280.5704099821747, "percentage": 98.9, "elapsed_time": "4:49:12", "remaining_time": "0:03:12", "throughput": 5820.49, "total_tokens": 100997560}
|
| 8110 |
+
{"current_steps": 39565, "total_steps": 40000, "loss": 0.0, "lr": 8.793698394781723e-05, "epoch": 280.6060606060606, "percentage": 98.91, "elapsed_time": "4:49:14", "remaining_time": "0:03:10", "throughput": 5820.5, "total_tokens": 101009624}
|
| 8111 |
+
{"current_steps": 39570, "total_steps": 40000, "loss": 0.0, "lr": 8.593183695030926e-05, "epoch": 280.6417112299465, "percentage": 98.92, "elapsed_time": "4:49:16", "remaining_time": "0:03:08", "throughput": 5820.46, "total_tokens": 101020504}
|
| 8112 |
+
{"current_steps": 39575, "total_steps": 40000, "loss": 0.0, "lr": 8.39498085860757e-05, "epoch": 280.67736185383245, "percentage": 98.94, "elapsed_time": "4:49:18", "remaining_time": "0:03:06", "throughput": 5820.49, "total_tokens": 101033112}
|
| 8113 |
+
{"current_steps": 39580, "total_steps": 40000, "loss": 0.0, "lr": 8.199089916072211e-05, "epoch": 280.71301247771834, "percentage": 98.95, "elapsed_time": "4:49:20", "remaining_time": "0:03:04", "throughput": 5820.54, "total_tokens": 101046040}
|
| 8114 |
+
{"current_steps": 39585, "total_steps": 40000, "loss": 0.0, "lr": 8.005510897637346e-05, "epoch": 280.7486631016043, "percentage": 98.96, "elapsed_time": "4:49:22", "remaining_time": "0:03:02", "throughput": 5820.59, "total_tokens": 101058904}
|
| 8115 |
+
{"current_steps": 39590, "total_steps": 40000, "loss": 0.0, "lr": 7.8142438331541e-05, "epoch": 280.7843137254902, "percentage": 98.98, "elapsed_time": "4:49:24", "remaining_time": "0:02:59", "throughput": 5820.61, "total_tokens": 101071064}
|
| 8116 |
+
{"current_steps": 39595, "total_steps": 40000, "loss": 0.0, "lr": 7.625288752117209e-05, "epoch": 280.8199643493761, "percentage": 98.99, "elapsed_time": "4:49:26", "remaining_time": "0:02:57", "throughput": 5820.71, "total_tokens": 101084920}
|
| 8117 |
+
{"current_steps": 39600, "total_steps": 40000, "loss": 0.0, "lr": 7.4386456836667e-05, "epoch": 280.85561497326205, "percentage": 99.0, "elapsed_time": "4:49:28", "remaining_time": "0:02:55", "throughput": 5820.68, "total_tokens": 101096120}
|
| 8118 |
+
{"current_steps": 39600, "total_steps": 40000, "eval_loss": 0.7351030707359314, "epoch": 280.85561497326205, "percentage": 99.0, "elapsed_time": "4:49:31", "remaining_time": "0:02:55", "throughput": 5819.57, "total_tokens": 101096120}
|