Rardilit commited on
Commit
49902f1
1 Parent(s): d178e57

Training Files Initial Commit

Browse files
Files changed (8) hide show
  1. config.json +23 -0
  2. optimizer.pt +3 -0
  3. pytorch_model.bin +3 -0
  4. rng_state.pth +3 -0
  5. scaler.pt +3 -0
  6. scheduler.pt +3 -0
  7. trainer_state.json +80 -0
  8. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LLaMAForCausalLM"
4
+ ],
5
+ "bos_token_id": 0,
6
+ "eos_token_id": 1,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 4096,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 11008,
11
+ "max_position_embeddings": 2048,
12
+ "max_sequence_length": 2048,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "pad_token_id": -1,
17
+ "rms_norm_eps": 1e-06,
18
+ "tie_word_embeddings": false,
19
+ "torch_dtype": "float16",
20
+ "transformers_version": "4.30.0.dev0",
21
+ "use_cache": true,
22
+ "vocab_size": 32000
23
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad5add92ee7ee82990a7f172c269e8196b546ad9a93ba55ef80f9ef4a62387f
3
+ size 33661637
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310d00ad54467c1b7503ed8f698a03db8bba710cfd20c331d1008c263be55ac8
3
+ size 16822989
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d049cafb18b7f47a234fcf3866a4c2c804a164a31e8971d4e7c32238f9d7371
3
+ size 14575
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a79944e2dfd765a29c7ec78561cfb25a4e79d7bdb4c76c3a8717188592da7b04
3
+ size 557
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecb7f65706cd916292a57e300dc43e4958eda7c3173cd4257f810ab135d1349
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.451780080795288,
3
+ "best_model_checkpoint": "/content/Model/checkpoint-8",
4
+ "epoch": 0.12,
5
+ "global_step": 8,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 2.9999999999999997e-05,
13
+ "loss": 2.7323,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.03,
18
+ "learning_rate": 5.9999999999999995e-05,
19
+ "loss": 2.7914,
20
+ "step": 2
21
+ },
22
+ {
23
+ "epoch": 0.04,
24
+ "learning_rate": 8.999999999999999e-05,
25
+ "loss": 2.7974,
26
+ "step": 3
27
+ },
28
+ {
29
+ "epoch": 0.06,
30
+ "learning_rate": 0.00011999999999999999,
31
+ "loss": 2.7408,
32
+ "step": 4
33
+ },
34
+ {
35
+ "epoch": 0.06,
36
+ "eval_loss": 2.6886792182922363,
37
+ "eval_runtime": 275.0654,
38
+ "eval_samples_per_second": 3.912,
39
+ "eval_steps_per_second": 0.491,
40
+ "step": 4
41
+ },
42
+ {
43
+ "epoch": 0.07,
44
+ "learning_rate": 0.00015,
45
+ "loss": 2.6641,
46
+ "step": 5
47
+ },
48
+ {
49
+ "epoch": 0.09,
50
+ "learning_rate": 0.00017999999999999998,
51
+ "loss": 2.604,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.1,
56
+ "learning_rate": 0.00020999999999999998,
57
+ "loss": 2.5921,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.12,
62
+ "learning_rate": 0.00023999999999999998,
63
+ "loss": 2.5239,
64
+ "step": 8
65
+ },
66
+ {
67
+ "epoch": 0.12,
68
+ "eval_loss": 2.451780080795288,
69
+ "eval_runtime": 274.9467,
70
+ "eval_samples_per_second": 3.913,
71
+ "eval_steps_per_second": 0.491,
72
+ "step": 8
73
+ }
74
+ ],
75
+ "max_steps": 10,
76
+ "num_train_epochs": 1,
77
+ "total_flos": 1.0631008082460672e+16,
78
+ "trial_name": null,
79
+ "trial_params": null
80
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:279014beb727af8f676d08c7607ed2bcce578a742f1dab4d834ec6848d07c9f8
3
+ size 3899