taufiqdp commited on
Commit
1207906
1 Parent(s): 4c51228

Upload folder using huggingface_hub

Browse files
Files changed (38) hide show
  1. checkpoints/checkpoint-46000/config.json +29 -0
  2. checkpoints/checkpoint-46000/model.safetensors +3 -0
  3. checkpoints/checkpoint-46000/optimizer.pt +3 -0
  4. checkpoints/checkpoint-46000/rng_state.pth +3 -0
  5. checkpoints/checkpoint-46000/scheduler.pt +3 -0
  6. checkpoints/checkpoint-46000/trainer_state.json +0 -0
  7. checkpoints/checkpoint-46000/training_args.bin +3 -0
  8. checkpoints/checkpoint-47000/config.json +29 -0
  9. checkpoints/checkpoint-47000/model.safetensors +3 -0
  10. checkpoints/checkpoint-47000/optimizer.pt +3 -0
  11. checkpoints/checkpoint-47000/rng_state.pth +3 -0
  12. checkpoints/checkpoint-47000/scheduler.pt +3 -0
  13. checkpoints/checkpoint-47000/trainer_state.json +0 -0
  14. checkpoints/checkpoint-47000/training_args.bin +3 -0
  15. checkpoints/checkpoint-48000/config.json +29 -0
  16. checkpoints/checkpoint-48000/model.safetensors +3 -0
  17. checkpoints/checkpoint-48000/optimizer.pt +3 -0
  18. checkpoints/checkpoint-48000/rng_state.pth +3 -0
  19. checkpoints/checkpoint-48000/scheduler.pt +3 -0
  20. checkpoints/checkpoint-48000/trainer_state.json +0 -0
  21. checkpoints/checkpoint-48000/training_args.bin +3 -0
  22. checkpoints/checkpoint-49000/config.json +29 -0
  23. checkpoints/checkpoint-49000/model.safetensors +3 -0
  24. checkpoints/checkpoint-49000/optimizer.pt +3 -0
  25. checkpoints/checkpoint-49000/rng_state.pth +3 -0
  26. checkpoints/checkpoint-49000/scheduler.pt +3 -0
  27. checkpoints/checkpoint-49000/trainer_state.json +0 -0
  28. checkpoints/checkpoint-49000/training_args.bin +3 -0
  29. checkpoints/checkpoint-50000/config.json +29 -0
  30. checkpoints/checkpoint-50000/model.safetensors +3 -0
  31. checkpoints/checkpoint-50000/optimizer.pt +3 -0
  32. checkpoints/checkpoint-50000/rng_state.pth +3 -0
  33. checkpoints/checkpoint-50000/scheduler.pt +3 -0
  34. checkpoints/checkpoint-50000/trainer_state.json +0 -0
  35. checkpoints/checkpoint-50000/training_args.bin +3 -0
  36. checkpoints/config.json +1 -1
  37. checkpoints/model.safetensors +1 -1
  38. checkpoints/training_args.bin +1 -1
checkpoints/checkpoint-46000/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-38000",
3
+ "architectures": [
4
+ "AlbertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "embedding_size": 128,
10
+ "eos_token_id": 3,
11
+ "hidden_act": "gelu_new",
12
+ "hidden_dropout_prob": 0,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "inner_group_num": 1,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-12,
18
+ "max_position_embeddings": 512,
19
+ "model_type": "albert",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_groups": 1,
22
+ "num_hidden_layers": 12,
23
+ "pad_token_id": 0,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.38.2",
27
+ "type_vocab_size": 2,
28
+ "vocab_size": 30000
29
+ }
checkpoints/checkpoint-46000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc93e092f7d3ac1c26c014c594473671a2245d339e1d532d8b8a46dba29de113
3
+ size 44890256
checkpoints/checkpoint-46000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd5566a90354ffa3ab1616bcf6bbeb4518d1a2873f1d70ebf04db474ea5f9845
3
+ size 89797322
checkpoints/checkpoint-46000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acf4e962749405650b6c5c38e96a5da9f0daad169a0f0d9e14ab2bb8c10a1122
3
+ size 14244
checkpoints/checkpoint-46000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbac8b7c94b177dc14d8b5a9f3099ffc009f1e6e57a47bec58f3b938dfd5b459
3
+ size 1064
checkpoints/checkpoint-46000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-46000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859a43f8f50f6d107f5edda559ce599497f70507f4c174a92c481b462156193
3
+ size 5112
checkpoints/checkpoint-47000/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-38000",
3
+ "architectures": [
4
+ "AlbertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "embedding_size": 128,
10
+ "eos_token_id": 3,
11
+ "hidden_act": "gelu_new",
12
+ "hidden_dropout_prob": 0,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "inner_group_num": 1,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-12,
18
+ "max_position_embeddings": 512,
19
+ "model_type": "albert",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_groups": 1,
22
+ "num_hidden_layers": 12,
23
+ "pad_token_id": 0,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.38.2",
27
+ "type_vocab_size": 2,
28
+ "vocab_size": 30000
29
+ }
checkpoints/checkpoint-47000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7815ba54f350e63c047728444f665b294f8d90649e5e7d50c3aaa327c0100063
3
+ size 44890256
checkpoints/checkpoint-47000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca6c807e8e127607fe98cdcb6c5d4a3e6bc599c6146b1c1573dcbbbd24d76ea0
3
+ size 89797322
checkpoints/checkpoint-47000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77992273b26c17a4d6fe0a8012ba8b187f2a671a39996d8a66b60098a92606af
3
+ size 14244
checkpoints/checkpoint-47000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40ecf57290f34ab8719257df325ca3e4f0d2fe863a99f840baf5c8af659bda05
3
+ size 1064
checkpoints/checkpoint-47000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-47000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859a43f8f50f6d107f5edda559ce599497f70507f4c174a92c481b462156193
3
+ size 5112
checkpoints/checkpoint-48000/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-38000",
3
+ "architectures": [
4
+ "AlbertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "embedding_size": 128,
10
+ "eos_token_id": 3,
11
+ "hidden_act": "gelu_new",
12
+ "hidden_dropout_prob": 0,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "inner_group_num": 1,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-12,
18
+ "max_position_embeddings": 512,
19
+ "model_type": "albert",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_groups": 1,
22
+ "num_hidden_layers": 12,
23
+ "pad_token_id": 0,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.38.2",
27
+ "type_vocab_size": 2,
28
+ "vocab_size": 30000
29
+ }
checkpoints/checkpoint-48000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d35a1b040e691889bb8671eac4aa65747814cbd5e65ad626868f866a308330c
3
+ size 44890256
checkpoints/checkpoint-48000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a44b645d09c9d7f39dfe595b18119c7dd5f28f1873d80339fb5e2f5325bfec74
3
+ size 89797322
checkpoints/checkpoint-48000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bc9e0945f85cd1ef6dd99350756c4647d4a867570979cec99161a214578731f
3
+ size 14244
checkpoints/checkpoint-48000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26fa7c9bffd4f192263263be4e8526cdf169ffd4435249a9cd313f2a0b16aedf
3
+ size 1064
checkpoints/checkpoint-48000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-48000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859a43f8f50f6d107f5edda559ce599497f70507f4c174a92c481b462156193
3
+ size 5112
checkpoints/checkpoint-49000/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-38000",
3
+ "architectures": [
4
+ "AlbertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "embedding_size": 128,
10
+ "eos_token_id": 3,
11
+ "hidden_act": "gelu_new",
12
+ "hidden_dropout_prob": 0,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "inner_group_num": 1,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-12,
18
+ "max_position_embeddings": 512,
19
+ "model_type": "albert",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_groups": 1,
22
+ "num_hidden_layers": 12,
23
+ "pad_token_id": 0,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.38.2",
27
+ "type_vocab_size": 2,
28
+ "vocab_size": 30000
29
+ }
checkpoints/checkpoint-49000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a4c58092c1ce4fc55ed69db8aec74d9164c86bb16a7150364d0ffb716f4736
3
+ size 44890256
checkpoints/checkpoint-49000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecb33172a265a86491e425cd0512d174dd4c566910404a50cbb5e0cc6ac6aa15
3
+ size 89797322
checkpoints/checkpoint-49000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a3afc4169ed7ec5713704a17ba5e013a05179f8c90eb5ef23cc523ebfd5dab7
3
+ size 14244
checkpoints/checkpoint-49000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d527cedeb107d2eeb189c1b06f34c22d2247092ee4a7ccffb59b38ba4b7c48b
3
+ size 1064
checkpoints/checkpoint-49000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-49000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859a43f8f50f6d107f5edda559ce599497f70507f4c174a92c481b462156193
3
+ size 5112
checkpoints/checkpoint-50000/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-38000",
3
+ "architectures": [
4
+ "AlbertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "embedding_size": 128,
10
+ "eos_token_id": 3,
11
+ "hidden_act": "gelu_new",
12
+ "hidden_dropout_prob": 0,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "inner_group_num": 1,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-12,
18
+ "max_position_embeddings": 512,
19
+ "model_type": "albert",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_groups": 1,
22
+ "num_hidden_layers": 12,
23
+ "pad_token_id": 0,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.38.2",
27
+ "type_vocab_size": 2,
28
+ "vocab_size": 30000
29
+ }
checkpoints/checkpoint-50000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6282ac12001508f3896fc3dfa89f981a5d570184187728288c152c09968c763b
3
+ size 44890256
checkpoints/checkpoint-50000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ce741a8927f411e1620972a39cb7e465a3fb8bdb9b5ac4d733faa103736c6f6
3
+ size 89797322
checkpoints/checkpoint-50000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cebcbeb5ff7ea9358fcc6e90a6f3366ccddc4b7148c020eb83dd9fd044397b2
3
+ size 14244
checkpoints/checkpoint-50000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c0b5d6c1b4dab78409ec7785b0515815223a3c9936c0fdebe4e3dddc2feccd0
3
+ size 1064
checkpoints/checkpoint-50000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-50000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859a43f8f50f6d107f5edda559ce599497f70507f4c174a92c481b462156193
3
+ size 5112
checkpoints/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-25000",
3
  "architectures": [
4
  "AlbertForMaskedLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/nusa-albert/checkpoint-38000",
3
  "architectures": [
4
  "AlbertForMaskedLM"
5
  ],
checkpoints/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:830cb24badbac18edfa59a7ad9bf307055bb91761ef656a235335d0f4bf22a10
3
  size 44890256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6282ac12001508f3896fc3dfa89f981a5d570184187728288c152c09968c763b
3
  size 44890256
checkpoints/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2082f22c6e15e3580ffafef7d6546679ce8b0b31d9030c785ab63457d6d33de7
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859a43f8f50f6d107f5edda559ce599497f70507f4c174a92c481b462156193
3
  size 5112