Harish Tayyar Madabushi commited on
Commit
50d1635
1 Parent(s): 9980210

added model

Browse files
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "problem_type": "single_label_classification",
19
+ "qa_dropout": 0.1,
20
+ "seq_classif_dropout": 0.2,
21
+ "sinusoidal_pos_embds": false,
22
+ "tie_weights_": true,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.10.0.dev0",
25
+ "vocab_size": 28996
26
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71445761097e707dddcbc105d01c55f84fa4a2a4fc366ae878dec7192911ad2a
3
+ size 526327977
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d28dd79ecb0adf125e697b21e5e83837251bd08054fc295d80e6b920b0eee3d0
3
+ size 263174287
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d1b379e3f479f666b3735554652330d68800c321c8235592ba6750ea30a479a
3
+ size 14649
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a30656fcdfc4d0c89d1f2b76eff1eaadef807c69e180832bb3fe2adea1df336
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-cased", "tokenizer_class": "DistilBertTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7565779291131405,
3
+ "best_model_checkpoint": "output-no-git/EN-OUTPUT/distilbert-base-cased/FalseTrue-0/2/checkpoint-520",
4
+ "epoch": 5.0,
5
+ "global_step": 520,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.7446351647377014,
13
+ "eval_f1": 0.7378445712448767,
14
+ "eval_f1_binary": 0.7800369685767097,
15
+ "eval_f1_weighted": 0.7470798170127886,
16
+ "eval_loss": 0.7295559048652649,
17
+ "eval_runtime": 0.9453,
18
+ "eval_samples_per_second": 492.97,
19
+ "eval_steps_per_second": 62.415,
20
+ "step": 104
21
+ },
22
+ {
23
+ "epoch": 2.0,
24
+ "eval_accuracy": 0.7424892783164978,
25
+ "eval_f1": 0.7401728463897408,
26
+ "eval_f1_binary": 0.7647058823529411,
27
+ "eval_f1_weighted": 0.7455427383816858,
28
+ "eval_loss": 0.9534124732017517,
29
+ "eval_runtime": 0.7364,
30
+ "eval_samples_per_second": 632.823,
31
+ "eval_steps_per_second": 80.121,
32
+ "step": 208
33
+ },
34
+ {
35
+ "epoch": 3.0,
36
+ "eval_accuracy": 0.7618025541305542,
37
+ "eval_f1": 0.7543816329922364,
38
+ "eval_f1_binary": 0.7970749542961609,
39
+ "eval_f1_weighted": 0.7637265230630523,
40
+ "eval_loss": 1.0867007970809937,
41
+ "eval_runtime": 0.736,
42
+ "eval_samples_per_second": 633.169,
43
+ "eval_steps_per_second": 80.165,
44
+ "step": 312
45
+ },
46
+ {
47
+ "epoch": 4.0,
48
+ "eval_accuracy": 0.7532188892364502,
49
+ "eval_f1": 0.7470105321795617,
50
+ "eval_f1_binary": 0.7866419294990724,
51
+ "eval_f1_weighted": 0.7556852157130167,
52
+ "eval_loss": 1.2413438558578491,
53
+ "eval_runtime": 0.7367,
54
+ "eval_samples_per_second": 632.508,
55
+ "eval_steps_per_second": 80.082,
56
+ "step": 416
57
+ },
58
+ {
59
+ "epoch": 4.81,
60
+ "learning_rate": 9.316239316239318e-06,
61
+ "loss": 0.1347,
62
+ "step": 500
63
+ },
64
+ {
65
+ "epoch": 5.0,
66
+ "eval_accuracy": 0.7682403326034546,
67
+ "eval_f1": 0.7565779291131405,
68
+ "eval_f1_binary": 0.8098591549295775,
69
+ "eval_f1_weighted": 0.7682403433476395,
70
+ "eval_loss": 1.2424249649047852,
71
+ "eval_runtime": 0.7359,
72
+ "eval_samples_per_second": 633.202,
73
+ "eval_steps_per_second": 80.169,
74
+ "step": 520
75
+ }
76
+ ],
77
+ "max_steps": 936,
78
+ "num_train_epochs": 9,
79
+ "total_flos": 550898794160640.0,
80
+ "trial_name": null,
81
+ "trial_params": null
82
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32828f31b46ce6b06ec54c7895e01003dafbca56a8af9b867f2b06c70908f98
3
+ size 2735
vocab.txt ADDED
The diff for this file is too large to render. See raw diff