XeroCodes commited on
Commit
6443fb1
·
verified ·
1 Parent(s): cd1ed66

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. added_tokens.json +3 -0
  2. checkpoint-1000/config.json +31 -0
  3. checkpoint-1000/generation_config.json +8 -0
  4. checkpoint-1000/model.safetensors +3 -0
  5. checkpoint-1000/optimizer.pt +3 -0
  6. checkpoint-1000/rng_state.pth +3 -0
  7. checkpoint-1000/scheduler.pt +3 -0
  8. checkpoint-1000/trainer_state.json +47 -0
  9. checkpoint-1000/training_args.bin +3 -0
  10. checkpoint-1500/config.json +31 -0
  11. checkpoint-1500/generation_config.json +8 -0
  12. checkpoint-1500/model.safetensors +3 -0
  13. checkpoint-1500/optimizer.pt +3 -0
  14. checkpoint-1500/rng_state.pth +3 -0
  15. checkpoint-1500/scheduler.pt +3 -0
  16. checkpoint-1500/trainer_state.json +54 -0
  17. checkpoint-1500/training_args.bin +3 -0
  18. checkpoint-2000/config.json +31 -0
  19. checkpoint-2000/generation_config.json +8 -0
  20. checkpoint-2000/model.safetensors +3 -0
  21. checkpoint-2000/optimizer.pt +3 -0
  22. checkpoint-2000/rng_state.pth +3 -0
  23. checkpoint-2000/scheduler.pt +3 -0
  24. checkpoint-2000/trainer_state.json +61 -0
  25. checkpoint-2000/training_args.bin +3 -0
  26. checkpoint-2500/config.json +31 -0
  27. checkpoint-2500/generation_config.json +8 -0
  28. checkpoint-2500/model.safetensors +3 -0
  29. checkpoint-2500/optimizer.pt +3 -0
  30. checkpoint-2500/rng_state.pth +3 -0
  31. checkpoint-2500/scheduler.pt +3 -0
  32. checkpoint-2500/trainer_state.json +68 -0
  33. checkpoint-2500/training_args.bin +3 -0
  34. checkpoint-3000/config.json +31 -0
  35. checkpoint-3000/generation_config.json +8 -0
  36. checkpoint-3000/model.safetensors +3 -0
  37. checkpoint-3000/optimizer.pt +3 -0
  38. checkpoint-3000/rng_state.pth +3 -0
  39. checkpoint-3000/scheduler.pt +3 -0
  40. checkpoint-3000/trainer_state.json +75 -0
  41. checkpoint-3000/training_args.bin +3 -0
  42. checkpoint-3500/config.json +31 -0
  43. checkpoint-3500/generation_config.json +8 -0
  44. checkpoint-3500/model.safetensors +3 -0
  45. checkpoint-3500/optimizer.pt +3 -0
  46. checkpoint-3500/rng_state.pth +3 -0
  47. checkpoint-3500/scheduler.pt +3 -0
  48. checkpoint-3500/trainer_state.json +82 -0
  49. checkpoint-3500/training_args.bin +3 -0
  50. checkpoint-4000/config.json +31 -0
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
checkpoint-1000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }
checkpoint-1000/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.0.dev0",
7
+ "use_cache": false
8
+ }
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e67f5009a250f2d9c94dfc23f696043361e95e40ee9e8c3d12eef5be073b86a0
3
+ size 120998648
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b37cdd41b861b1a00bb4fa687e537fdc5092a8cdb2f37cf1ca136cc9c1fd1eb
3
+ size 242089082
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea005b38dffb3ddb68fe35f651e4ee5e119bf913293dda58efdd7f56f4335656
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:773c1ef53a65c444ad5d96a455011e0c8e03be3c3c98750c5a494ea1c70c2a28
3
+ size 1064
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.49975012493753124,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24987506246876562,
13
+ "grad_norm": 1.422733187675476,
14
+ "learning_rate": 4.5835415625520576e-05,
15
+ "loss": 6.5696,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49975012493753124,
20
+ "grad_norm": 1.82273268699646,
21
+ "learning_rate": 4.167083125104115e-05,
22
+ "loss": 4.7727,
23
+ "step": 1000
24
+ }
25
+ ],
26
+ "logging_steps": 500,
27
+ "max_steps": 6003,
28
+ "num_input_tokens_seen": 0,
29
+ "num_train_epochs": 3,
30
+ "save_steps": 500,
31
+ "stateful_callbacks": {
32
+ "TrainerControl": {
33
+ "args": {
34
+ "should_epoch_stop": false,
35
+ "should_evaluate": false,
36
+ "should_log": false,
37
+ "should_save": true,
38
+ "should_training_stop": false
39
+ },
40
+ "attributes": {}
41
+ }
42
+ },
43
+ "total_flos": 135495942144000.0,
44
+ "train_batch_size": 8,
45
+ "trial_name": null,
46
+ "trial_params": null
47
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6f30cb8beaebf42035cec064257dc73ecd20aac591d533807023b0cc182733
3
+ size 5240
checkpoint-1500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }
checkpoint-1500/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.0.dev0",
7
+ "use_cache": false
8
+ }
checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6945aff376efd11d24c745a2179caf8fcf994122011ac4aca3461717aa4fda48
3
+ size 120998648
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88acfb54df5799d88cf7bcfdecb1f741088b16c354abf1d366ab870c409941f7
3
+ size 242089082
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea005b38dffb3ddb68fe35f651e4ee5e119bf913293dda58efdd7f56f4335656
3
+ size 14244
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85746fcb95f2620697689a2f0a77790355e75fa81fea04251c19c0bb3ad811fa
3
+ size 1064
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.7496251874062968,
5
+ "eval_steps": 500,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24987506246876562,
13
+ "grad_norm": 1.422733187675476,
14
+ "learning_rate": 4.5835415625520576e-05,
15
+ "loss": 6.5696,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49975012493753124,
20
+ "grad_norm": 1.82273268699646,
21
+ "learning_rate": 4.167083125104115e-05,
22
+ "loss": 4.7727,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7496251874062968,
27
+ "grad_norm": 2.662141799926758,
28
+ "learning_rate": 3.7506246876561725e-05,
29
+ "loss": 4.4216,
30
+ "step": 1500
31
+ }
32
+ ],
33
+ "logging_steps": 500,
34
+ "max_steps": 6003,
35
+ "num_input_tokens_seen": 0,
36
+ "num_train_epochs": 3,
37
+ "save_steps": 500,
38
+ "stateful_callbacks": {
39
+ "TrainerControl": {
40
+ "args": {
41
+ "should_epoch_stop": false,
42
+ "should_evaluate": false,
43
+ "should_log": false,
44
+ "should_save": true,
45
+ "should_training_stop": false
46
+ },
47
+ "attributes": {}
48
+ }
49
+ },
50
+ "total_flos": 203243913216000.0,
51
+ "train_batch_size": 8,
52
+ "trial_name": null,
53
+ "trial_params": null
54
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6f30cb8beaebf42035cec064257dc73ecd20aac591d533807023b0cc182733
3
+ size 5240
checkpoint-2000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }
checkpoint-2000/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.0.dev0",
7
+ "use_cache": false
8
+ }
checkpoint-2000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87dd617d10fbb597da567fc1fa2cb005aab33015645be8b0b8fd41c93580f68a
3
+ size 120998648
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cef7d6274639c009e8504bb5630231ef7d05aa6a86f5fe947500dff6f593a682
3
+ size 242089082
checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea005b38dffb3ddb68fe35f651e4ee5e119bf913293dda58efdd7f56f4335656
3
+ size 14244
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89c5dd2ba40335251651e1bf6d6f55a6231bcff52aafbb100d6cf7d5961d5d89
3
+ size 1064
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9995002498750625,
5
+ "eval_steps": 500,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24987506246876562,
13
+ "grad_norm": 1.422733187675476,
14
+ "learning_rate": 4.5835415625520576e-05,
15
+ "loss": 6.5696,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49975012493753124,
20
+ "grad_norm": 1.82273268699646,
21
+ "learning_rate": 4.167083125104115e-05,
22
+ "loss": 4.7727,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7496251874062968,
27
+ "grad_norm": 2.662141799926758,
28
+ "learning_rate": 3.7506246876561725e-05,
29
+ "loss": 4.4216,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.9995002498750625,
34
+ "grad_norm": 2.524129629135132,
35
+ "learning_rate": 3.334166250208229e-05,
36
+ "loss": 4.178,
37
+ "step": 2000
38
+ }
39
+ ],
40
+ "logging_steps": 500,
41
+ "max_steps": 6003,
42
+ "num_input_tokens_seen": 0,
43
+ "num_train_epochs": 3,
44
+ "save_steps": 500,
45
+ "stateful_callbacks": {
46
+ "TrainerControl": {
47
+ "args": {
48
+ "should_epoch_stop": false,
49
+ "should_evaluate": false,
50
+ "should_log": false,
51
+ "should_save": true,
52
+ "should_training_stop": false
53
+ },
54
+ "attributes": {}
55
+ }
56
+ },
57
+ "total_flos": 270991884288000.0,
58
+ "train_batch_size": 8,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6f30cb8beaebf42035cec064257dc73ecd20aac591d533807023b0cc182733
3
+ size 5240
checkpoint-2500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }
checkpoint-2500/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.0.dev0",
7
+ "use_cache": false
8
+ }
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc79216580f30ec62c5f1251131b92a510f1147518ec4253f5097c91dd3ae305
3
+ size 120998648
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4a98acc9b8815d9ce3ca376f17194c94f3e6820cb04e7d26996e1b8b4ced7d1
3
+ size 242089082
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c44141bbdc09951afac7e1120c543329e05011264f0246580d2c7b808a155c7
3
+ size 14244
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b0e74352a7a9430e4ccea6937b4aac21c15810086a614b63b55cdc40a702df8
3
+ size 1064
checkpoint-2500/trainer_state.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.249375312343828,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24987506246876562,
13
+ "grad_norm": 1.422733187675476,
14
+ "learning_rate": 4.5835415625520576e-05,
15
+ "loss": 6.5696,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49975012493753124,
20
+ "grad_norm": 1.82273268699646,
21
+ "learning_rate": 4.167083125104115e-05,
22
+ "loss": 4.7727,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7496251874062968,
27
+ "grad_norm": 2.662141799926758,
28
+ "learning_rate": 3.7506246876561725e-05,
29
+ "loss": 4.4216,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.9995002498750625,
34
+ "grad_norm": 2.524129629135132,
35
+ "learning_rate": 3.334166250208229e-05,
36
+ "loss": 4.178,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.249375312343828,
41
+ "grad_norm": 2.6071786880493164,
42
+ "learning_rate": 2.917707812760287e-05,
43
+ "loss": 3.9573,
44
+ "step": 2500
45
+ }
46
+ ],
47
+ "logging_steps": 500,
48
+ "max_steps": 6003,
49
+ "num_input_tokens_seen": 0,
50
+ "num_train_epochs": 3,
51
+ "save_steps": 500,
52
+ "stateful_callbacks": {
53
+ "TrainerControl": {
54
+ "args": {
55
+ "should_epoch_stop": false,
56
+ "should_evaluate": false,
57
+ "should_log": false,
58
+ "should_save": true,
59
+ "should_training_stop": false
60
+ },
61
+ "attributes": {}
62
+ }
63
+ },
64
+ "total_flos": 338705981374464.0,
65
+ "train_batch_size": 8,
66
+ "trial_name": null,
67
+ "trial_params": null
68
+ }
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6f30cb8beaebf42035cec064257dc73ecd20aac591d533807023b0cc182733
3
+ size 5240
checkpoint-3000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }
checkpoint-3000/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.0.dev0",
7
+ "use_cache": false
8
+ }
checkpoint-3000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fc92bfc7ab464809fae8ad5b692e357003356a2b0c91f3ff49b53aac6642ef6
3
+ size 120998648
checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fd09eb388839f5c041e1e84511022068a25c95d3d6692719fd3d339b58831ad
3
+ size 242089082
checkpoint-3000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c44141bbdc09951afac7e1120c543329e05011264f0246580d2c7b808a155c7
3
+ size 14244
checkpoint-3000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e742b5da5a6a29be39d53f608c57d8a76037d5dbd851f02daab87e72860ba04d
3
+ size 1064
checkpoint-3000/trainer_state.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.4992503748125938,
5
+ "eval_steps": 500,
6
+ "global_step": 3000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24987506246876562,
13
+ "grad_norm": 1.422733187675476,
14
+ "learning_rate": 4.5835415625520576e-05,
15
+ "loss": 6.5696,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49975012493753124,
20
+ "grad_norm": 1.82273268699646,
21
+ "learning_rate": 4.167083125104115e-05,
22
+ "loss": 4.7727,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7496251874062968,
27
+ "grad_norm": 2.662141799926758,
28
+ "learning_rate": 3.7506246876561725e-05,
29
+ "loss": 4.4216,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.9995002498750625,
34
+ "grad_norm": 2.524129629135132,
35
+ "learning_rate": 3.334166250208229e-05,
36
+ "loss": 4.178,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.249375312343828,
41
+ "grad_norm": 2.6071786880493164,
42
+ "learning_rate": 2.917707812760287e-05,
43
+ "loss": 3.9573,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 1.4992503748125938,
48
+ "grad_norm": 2.997709274291992,
49
+ "learning_rate": 2.501249375312344e-05,
50
+ "loss": 3.86,
51
+ "step": 3000
52
+ }
53
+ ],
54
+ "logging_steps": 500,
55
+ "max_steps": 6003,
56
+ "num_input_tokens_seen": 0,
57
+ "num_train_epochs": 3,
58
+ "save_steps": 500,
59
+ "stateful_callbacks": {
60
+ "TrainerControl": {
61
+ "args": {
62
+ "should_epoch_stop": false,
63
+ "should_evaluate": false,
64
+ "should_log": false,
65
+ "should_save": true,
66
+ "should_training_stop": false
67
+ },
68
+ "attributes": {}
69
+ }
70
+ },
71
+ "total_flos": 406453952446464.0,
72
+ "train_batch_size": 8,
73
+ "trial_name": null,
74
+ "trial_params": null
75
+ }
checkpoint-3000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6f30cb8beaebf42035cec064257dc73ecd20aac591d533807023b0cc182733
3
+ size 5240
checkpoint-3500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }
checkpoint-3500/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.0.dev0",
7
+ "use_cache": false
8
+ }
checkpoint-3500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92b7df3af6edc0577c303aa3e79aeb5a9a870b43d0fafee0423a1f03d4763029
3
+ size 120998648
checkpoint-3500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af1ceed33965ab3cd717b9b8758e04c5b1b559482a8a1b94d2195c245136581e
3
+ size 242089082
checkpoint-3500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c44141bbdc09951afac7e1120c543329e05011264f0246580d2c7b808a155c7
3
+ size 14244
checkpoint-3500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d98a12675e85ec1c1e84206a6eee6abc4af6262538de4752065ab3f52b793c21
3
+ size 1064
checkpoint-3500/trainer_state.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.7491254372813594,
5
+ "eval_steps": 500,
6
+ "global_step": 3500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24987506246876562,
13
+ "grad_norm": 1.422733187675476,
14
+ "learning_rate": 4.5835415625520576e-05,
15
+ "loss": 6.5696,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49975012493753124,
20
+ "grad_norm": 1.82273268699646,
21
+ "learning_rate": 4.167083125104115e-05,
22
+ "loss": 4.7727,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7496251874062968,
27
+ "grad_norm": 2.662141799926758,
28
+ "learning_rate": 3.7506246876561725e-05,
29
+ "loss": 4.4216,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.9995002498750625,
34
+ "grad_norm": 2.524129629135132,
35
+ "learning_rate": 3.334166250208229e-05,
36
+ "loss": 4.178,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.249375312343828,
41
+ "grad_norm": 2.6071786880493164,
42
+ "learning_rate": 2.917707812760287e-05,
43
+ "loss": 3.9573,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 1.4992503748125938,
48
+ "grad_norm": 2.997709274291992,
49
+ "learning_rate": 2.501249375312344e-05,
50
+ "loss": 3.86,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 1.7491254372813594,
55
+ "grad_norm": 2.626232862472534,
56
+ "learning_rate": 2.084790937864401e-05,
57
+ "loss": 3.7562,
58
+ "step": 3500
59
+ }
60
+ ],
61
+ "logging_steps": 500,
62
+ "max_steps": 6003,
63
+ "num_input_tokens_seen": 0,
64
+ "num_train_epochs": 3,
65
+ "save_steps": 500,
66
+ "stateful_callbacks": {
67
+ "TrainerControl": {
68
+ "args": {
69
+ "should_epoch_stop": false,
70
+ "should_evaluate": false,
71
+ "should_log": false,
72
+ "should_save": true,
73
+ "should_training_stop": false
74
+ },
75
+ "attributes": {}
76
+ }
77
+ },
78
+ "total_flos": 474201923518464.0,
79
+ "train_batch_size": 8,
80
+ "trial_name": null,
81
+ "trial_params": null
82
+ }
checkpoint-3500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6f30cb8beaebf42035cec064257dc73ecd20aac591d533807023b0cc182733
3
+ size 5240
checkpoint-4000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "RajuKandasamy/tamillama_tiny_30m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 3,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 256,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 786,
15
+ "max_position_embeddings": 2048,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 16,
20
+ "num_key_value_heads": 8,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.47.0.dev0",
29
+ "use_cache": false,
30
+ "vocab_size": 32001
31
+ }