joelniklaus commited on
Commit
e010e57
1 Parent(s): 821af66

Training in progress, step 450000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02a8f872099d0391c9507ce223e1e3ff4a42eb12b57cc249f652e861b7223e3f
3
  size 3480942553
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50e009455c7cefb9c30e103d277cfaec67cd0645bb2b8cd1a83101dac2bdc6aa
3
  size 3480942553
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37a0263f3d21f63b75bfbe41e9f1a282e52db05c95edbe95df44515b1ea2c1e2
3
  size 1740493675
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56be5e7262b73181eb5e8e0d32e8f43f7a3e8e2c08ec528fa753ff63fb8e7b3a
3
  size 1740493675
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45bdf8a607dd22dd050193b5e8274ec1988a5df7ee95345fb0b30620efbfb73c
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd442869e8775adda4fa0ba41dd40ec6f6fa57771db7950cf5ce294923af00b6
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e4ecef8b58c710458716a0153f8519567dd2a15c4728bc445f0af4d3fb15782
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78e735efa7e40e0dd22dcac5cb3724b0cbe120563d603ea4b62f22b0f40fc602
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4,
5
- "global_step": 400000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -2470,11 +2470,319 @@
2470
  "eval_samples_per_second": 25.615,
2471
  "eval_steps_per_second": 0.405,
2472
  "step": 400000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2473
  }
2474
  ],
2475
  "max_steps": 1000000,
2476
  "num_train_epochs": 9223372036854775807,
2477
- "total_flos": 2.38675090735104e+19,
2478
  "trial_name": null,
2479
  "trial_params": null
2480
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.45,
5
+ "global_step": 450000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
2470
  "eval_samples_per_second": 25.615,
2471
  "eval_steps_per_second": 0.405,
2472
  "step": 400000
2473
+ },
2474
+ {
2475
+ "epoch": 0.4,
2476
+ "learning_rate": 6.993324133116726e-05,
2477
+ "loss": 0.8436,
2478
+ "step": 401000
2479
+ },
2480
+ {
2481
+ "epoch": 0.4,
2482
+ "learning_rate": 6.978149344295242e-05,
2483
+ "loss": 0.837,
2484
+ "step": 402000
2485
+ },
2486
+ {
2487
+ "epoch": 0.4,
2488
+ "learning_rate": 6.962952922749457e-05,
2489
+ "loss": 0.846,
2490
+ "step": 403000
2491
+ },
2492
+ {
2493
+ "epoch": 0.4,
2494
+ "learning_rate": 6.947735034665002e-05,
2495
+ "loss": 0.8244,
2496
+ "step": 404000
2497
+ },
2498
+ {
2499
+ "epoch": 0.41,
2500
+ "learning_rate": 6.932495846462261e-05,
2501
+ "loss": 0.8232,
2502
+ "step": 405000
2503
+ },
2504
+ {
2505
+ "epoch": 0.41,
2506
+ "learning_rate": 6.917235524794558e-05,
2507
+ "loss": 0.8376,
2508
+ "step": 406000
2509
+ },
2510
+ {
2511
+ "epoch": 0.41,
2512
+ "learning_rate": 6.901954236546323e-05,
2513
+ "loss": 0.8296,
2514
+ "step": 407000
2515
+ },
2516
+ {
2517
+ "epoch": 0.41,
2518
+ "learning_rate": 6.886652148831279e-05,
2519
+ "loss": 0.839,
2520
+ "step": 408000
2521
+ },
2522
+ {
2523
+ "epoch": 0.41,
2524
+ "learning_rate": 6.871329428990602e-05,
2525
+ "loss": 0.8564,
2526
+ "step": 409000
2527
+ },
2528
+ {
2529
+ "epoch": 0.41,
2530
+ "learning_rate": 6.855986244591104e-05,
2531
+ "loss": 0.8704,
2532
+ "step": 410000
2533
+ },
2534
+ {
2535
+ "epoch": 0.41,
2536
+ "learning_rate": 6.840622763423391e-05,
2537
+ "loss": 0.847,
2538
+ "step": 411000
2539
+ },
2540
+ {
2541
+ "epoch": 0.41,
2542
+ "learning_rate": 6.825239153500029e-05,
2543
+ "loss": 0.8419,
2544
+ "step": 412000
2545
+ },
2546
+ {
2547
+ "epoch": 0.41,
2548
+ "learning_rate": 6.809835583053715e-05,
2549
+ "loss": 0.8467,
2550
+ "step": 413000
2551
+ },
2552
+ {
2553
+ "epoch": 0.41,
2554
+ "learning_rate": 6.794412220535426e-05,
2555
+ "loss": 0.8624,
2556
+ "step": 414000
2557
+ },
2558
+ {
2559
+ "epoch": 0.41,
2560
+ "learning_rate": 6.778969234612584e-05,
2561
+ "loss": 0.8592,
2562
+ "step": 415000
2563
+ },
2564
+ {
2565
+ "epoch": 0.42,
2566
+ "learning_rate": 6.763506794167208e-05,
2567
+ "loss": 0.8656,
2568
+ "step": 416000
2569
+ },
2570
+ {
2571
+ "epoch": 0.42,
2572
+ "learning_rate": 6.748025068294067e-05,
2573
+ "loss": 0.8455,
2574
+ "step": 417000
2575
+ },
2576
+ {
2577
+ "epoch": 0.42,
2578
+ "learning_rate": 6.732524226298841e-05,
2579
+ "loss": 0.8579,
2580
+ "step": 418000
2581
+ },
2582
+ {
2583
+ "epoch": 0.42,
2584
+ "learning_rate": 6.71700443769625e-05,
2585
+ "loss": 0.8402,
2586
+ "step": 419000
2587
+ },
2588
+ {
2589
+ "epoch": 0.42,
2590
+ "learning_rate": 6.701465872208216e-05,
2591
+ "loss": 0.8468,
2592
+ "step": 420000
2593
+ },
2594
+ {
2595
+ "epoch": 0.42,
2596
+ "learning_rate": 6.685908699762002e-05,
2597
+ "loss": 0.8411,
2598
+ "step": 421000
2599
+ },
2600
+ {
2601
+ "epoch": 0.42,
2602
+ "learning_rate": 6.670333090488356e-05,
2603
+ "loss": 0.8521,
2604
+ "step": 422000
2605
+ },
2606
+ {
2607
+ "epoch": 0.42,
2608
+ "learning_rate": 6.654739214719641e-05,
2609
+ "loss": 0.843,
2610
+ "step": 423000
2611
+ },
2612
+ {
2613
+ "epoch": 0.42,
2614
+ "learning_rate": 6.639127242987988e-05,
2615
+ "loss": 0.8347,
2616
+ "step": 424000
2617
+ },
2618
+ {
2619
+ "epoch": 0.42,
2620
+ "learning_rate": 6.623497346023418e-05,
2621
+ "loss": 0.8391,
2622
+ "step": 425000
2623
+ },
2624
+ {
2625
+ "epoch": 0.43,
2626
+ "learning_rate": 6.607849694751977e-05,
2627
+ "loss": 0.8606,
2628
+ "step": 426000
2629
+ },
2630
+ {
2631
+ "epoch": 0.43,
2632
+ "learning_rate": 6.592184460293877e-05,
2633
+ "loss": 0.8756,
2634
+ "step": 427000
2635
+ },
2636
+ {
2637
+ "epoch": 0.43,
2638
+ "learning_rate": 6.576501813961609e-05,
2639
+ "loss": 0.89,
2640
+ "step": 428000
2641
+ },
2642
+ {
2643
+ "epoch": 0.43,
2644
+ "learning_rate": 6.56080192725808e-05,
2645
+ "loss": 0.8687,
2646
+ "step": 429000
2647
+ },
2648
+ {
2649
+ "epoch": 0.43,
2650
+ "learning_rate": 6.545084971874738e-05,
2651
+ "loss": 0.8715,
2652
+ "step": 430000
2653
+ },
2654
+ {
2655
+ "epoch": 0.43,
2656
+ "learning_rate": 6.529351119689688e-05,
2657
+ "loss": 0.8797,
2658
+ "step": 431000
2659
+ },
2660
+ {
2661
+ "epoch": 0.43,
2662
+ "learning_rate": 6.513600542765817e-05,
2663
+ "loss": 0.8833,
2664
+ "step": 432000
2665
+ },
2666
+ {
2667
+ "epoch": 0.43,
2668
+ "learning_rate": 6.497833413348909e-05,
2669
+ "loss": 0.8757,
2670
+ "step": 433000
2671
+ },
2672
+ {
2673
+ "epoch": 0.43,
2674
+ "learning_rate": 6.48204990386577e-05,
2675
+ "loss": 0.8763,
2676
+ "step": 434000
2677
+ },
2678
+ {
2679
+ "epoch": 0.43,
2680
+ "learning_rate": 6.466250186922325e-05,
2681
+ "loss": 0.8587,
2682
+ "step": 435000
2683
+ },
2684
+ {
2685
+ "epoch": 0.44,
2686
+ "learning_rate": 6.450434435301751e-05,
2687
+ "loss": 0.8364,
2688
+ "step": 436000
2689
+ },
2690
+ {
2691
+ "epoch": 0.44,
2692
+ "learning_rate": 6.43460282196257e-05,
2693
+ "loss": 0.8597,
2694
+ "step": 437000
2695
+ },
2696
+ {
2697
+ "epoch": 0.44,
2698
+ "learning_rate": 6.418755520036775e-05,
2699
+ "loss": 0.8276,
2700
+ "step": 438000
2701
+ },
2702
+ {
2703
+ "epoch": 0.44,
2704
+ "learning_rate": 6.402892702827916e-05,
2705
+ "loss": 0.844,
2706
+ "step": 439000
2707
+ },
2708
+ {
2709
+ "epoch": 0.44,
2710
+ "learning_rate": 6.387014543809223e-05,
2711
+ "loss": 0.8257,
2712
+ "step": 440000
2713
+ },
2714
+ {
2715
+ "epoch": 0.44,
2716
+ "learning_rate": 6.371121216621698e-05,
2717
+ "loss": 0.8268,
2718
+ "step": 441000
2719
+ },
2720
+ {
2721
+ "epoch": 0.44,
2722
+ "learning_rate": 6.355212895072223e-05,
2723
+ "loss": 0.8533,
2724
+ "step": 442000
2725
+ },
2726
+ {
2727
+ "epoch": 0.44,
2728
+ "learning_rate": 6.339289753131649e-05,
2729
+ "loss": 0.8188,
2730
+ "step": 443000
2731
+ },
2732
+ {
2733
+ "epoch": 0.44,
2734
+ "learning_rate": 6.323351964932908e-05,
2735
+ "loss": 0.8319,
2736
+ "step": 444000
2737
+ },
2738
+ {
2739
+ "epoch": 0.45,
2740
+ "learning_rate": 6.307399704769099e-05,
2741
+ "loss": 0.8183,
2742
+ "step": 445000
2743
+ },
2744
+ {
2745
+ "epoch": 0.45,
2746
+ "learning_rate": 6.291433147091583e-05,
2747
+ "loss": 0.8307,
2748
+ "step": 446000
2749
+ },
2750
+ {
2751
+ "epoch": 0.45,
2752
+ "learning_rate": 6.275452466508077e-05,
2753
+ "loss": 0.8195,
2754
+ "step": 447000
2755
+ },
2756
+ {
2757
+ "epoch": 0.45,
2758
+ "learning_rate": 6.259457837780742e-05,
2759
+ "loss": 0.8038,
2760
+ "step": 448000
2761
+ },
2762
+ {
2763
+ "epoch": 0.45,
2764
+ "learning_rate": 6.243449435824276e-05,
2765
+ "loss": 0.8302,
2766
+ "step": 449000
2767
+ },
2768
+ {
2769
+ "epoch": 0.45,
2770
+ "learning_rate": 6.227427435703997e-05,
2771
+ "loss": 0.7914,
2772
+ "step": 450000
2773
+ },
2774
+ {
2775
+ "epoch": 0.45,
2776
+ "eval_loss": 0.4447863698005676,
2777
+ "eval_runtime": 172.7185,
2778
+ "eval_samples_per_second": 28.949,
2779
+ "eval_steps_per_second": 0.457,
2780
+ "step": 450000
2781
  }
2782
  ],
2783
  "max_steps": 1000000,
2784
  "num_train_epochs": 9223372036854775807,
2785
+ "total_flos": 2.68509477076992e+19,
2786
  "trial_name": null,
2787
  "trial_params": null
2788
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37a0263f3d21f63b75bfbe41e9f1a282e52db05c95edbe95df44515b1ea2c1e2
3
  size 1740493675
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56be5e7262b73181eb5e8e0d32e8f43f7a3e8e2c08ec528fa753ff63fb8e7b3a
3
  size 1740493675
runs/Feb25_19-25-50_t1v-n-15e54913-w-0/events.out.tfevents.1677353360.t1v-n-15e54913-w-0.2265434.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccabefbfbaa0ff0cc0c430d513126b217fe287c8557827607f13fa6dd5bcd1fd
3
- size 12089
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2581886f64da0096960caff74f2316798221f30d890453a9430c32e1de662586
3
+ size 20365