husnu commited on
Commit
6b92c13
1 Parent(s): f792021

Training in progress, step 500

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "husnu/bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.15.0",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 128000
25
+ }
last-checkpoint/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "husnu/bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.15.0",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 128000
25
+ }
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39fcc8c39b811b51925c50d44d86f10581c7b5c8a773963e39b2511004afdc08
3
+ size 1470165733
last-checkpoint/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55721cdc672bcdb58f63624f4327d65e4e31bfe9d3ff0543f0e35ba9160303a4
3
+ size 735108529
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea243a68bf7e14720486d113eeaa790fb5610e6149ee0c92efb6653782e7dcb2
3
+ size 14503
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77f48b2a9603b95bdc2ef20786d0135a198e10c3b345ef20ff0dfe4037461fcd
3
+ size 623
last-checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
last-checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
last-checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "husnu/bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.22271714922049,
5
+ "global_step": 500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.22,
12
+ "learning_rate": 1.5545657015590202e-05,
13
+ "loss": 0.7221,
14
+ "step": 500
15
+ }
16
+ ],
17
+ "max_steps": 2245,
18
+ "num_train_epochs": 1,
19
+ "total_flos": 783890270208000.0,
20
+ "trial_name": null,
21
+ "trial_params": null
22
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26a33438b6273725c7042b92ed130d29a7dbb79d0bc509dcc819c18058387eb9
3
+ size 3183
last-checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55721cdc672bcdb58f63624f4327d65e4e31bfe9d3ff0543f0e35ba9160303a4
3
+ size 735108529
runs/Jan15_19-13-15_7515370a3f04/1642274015.386008/events.out.tfevents.1642274015.7515370a3f04.73.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db1ae51f3677f9aa3ad9263dafef201db2ae42eec08a7db334f3742eb01eb60d
3
+ size 4979
runs/Jan15_19-13-15_7515370a3f04/events.out.tfevents.1642274015.7515370a3f04.73.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a27aa7b82ec67a1220bee80d7687be4fadf738ed27465d76f00d5e1ee3ab11b5
3
+ size 3727
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "husnu/bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26a33438b6273725c7042b92ed130d29a7dbb79d0bc509dcc819c18058387eb9
3
+ size 3183
vocab.txt ADDED
The diff for this file is too large to render. See raw diff