anjandash commited on
Commit
af38e1d
1 Parent(s): d161107

added model files

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
29
+ training_args.bin filter=lfs diff=lfs merge=lfs -text
30
+ tf_model.h5 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,9 @@
1
  ---
 
 
 
2
  license: mit
3
- ---
 
 
 
1
  ---
2
+
3
+ language:
4
+ - java
5
  license: mit
6
+ datasets:
7
+ - anjandash/java-8m-methods-v1
8
+
9
+ ---
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/Users/anjandash/Desktop/HF_MODELS/JavaBERT-small",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 3,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 512,
18
+ "model_type": "bert",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 0,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.15.0.dev0",
25
+ "type_vocab_size": 2,
26
+ "use_cache": true,
27
+ "vocab_size": 64000
28
+ }
convert_to_TF.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ from transformers import TFAutoModelForSequenceClassification
2
+
3
+ checkpoint_path = "/Users/anjandash/Desktop/HF_MODELS/JavaBERT-small"
4
+ tf_model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint_path, from_pt=True)
5
+ tf_model.save_pretrained(checkpoint_path)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8591a113fe3ee50b45784491ad89be36cbb7d890bc169cf12016df2f6ece395
3
+ size 881415723
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3fdc85a7ec7cc911fbfeff8dd59150740406934026703275230b6de44e3185e
3
+ size 15523
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e7c32d009e082d64df2d932bfa47a085fa94a846f8738f93ddbbcf35c4de54d
3
+ size 559
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00f19f3443fa68df47a4d4adad8bf0004a93170b3b8916e34a300491882a5702
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6934f86d1df4265b1c9c7d752e1d0dfc5da506aec6194097a3a71f16c7b0972
3
+ size 881549424
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "/home/akarmakar/Project2/tokenizer/jemma-java-bert-tokenizer", "tokenizer_class": "BertTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 6.430952072143555,
3
+ "best_model_checkpoint": "finetuned-bert__java-8m-methods__jemma-java-bert-tokenizer__XXXM/checkpoint-45000",
4
+ "epoch": 2.626185404569461,
5
+ "global_step": 45000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.29,
12
+ "learning_rate": 0.0004998919902242132,
13
+ "loss": 5.8968,
14
+ "step": 5000
15
+ },
16
+ {
17
+ "epoch": 0.29,
18
+ "eval_loss": 6.462705612182617,
19
+ "eval_runtime": 1805.2005,
20
+ "eval_samples_per_second": 27.514,
21
+ "eval_steps_per_second": 6.879,
22
+ "step": 5000
23
+ },
24
+ {
25
+ "epoch": 0.58,
26
+ "learning_rate": 0.0004994534515508689,
27
+ "loss": 5.9119,
28
+ "step": 10000
29
+ },
30
+ {
31
+ "epoch": 0.58,
32
+ "eval_loss": 6.476224422454834,
33
+ "eval_runtime": 1804.4274,
34
+ "eval_samples_per_second": 27.526,
35
+ "eval_steps_per_second": 6.882,
36
+ "step": 10000
37
+ },
38
+ {
39
+ "epoch": 0.88,
40
+ "learning_rate": 0.0004986784236353977,
41
+ "loss": 5.9089,
42
+ "step": 15000
43
+ },
44
+ {
45
+ "epoch": 0.88,
46
+ "eval_loss": 6.472481727600098,
47
+ "eval_runtime": 1804.1115,
48
+ "eval_samples_per_second": 27.531,
49
+ "eval_steps_per_second": 6.883,
50
+ "step": 15000
51
+ },
52
+ {
53
+ "epoch": 1.17,
54
+ "learning_rate": 0.0004975676423415004,
55
+ "loss": 5.9054,
56
+ "step": 20000
57
+ },
58
+ {
59
+ "epoch": 1.17,
60
+ "eval_loss": 6.4574785232543945,
61
+ "eval_runtime": 1804.4293,
62
+ "eval_samples_per_second": 27.526,
63
+ "eval_steps_per_second": 6.882,
64
+ "step": 20000
65
+ },
66
+ {
67
+ "epoch": 1.46,
68
+ "learning_rate": 0.0004961227282936757,
69
+ "loss": 5.9018,
70
+ "step": 25000
71
+ },
72
+ {
73
+ "epoch": 1.46,
74
+ "eval_loss": 6.441328525543213,
75
+ "eval_runtime": 1805.0751,
76
+ "eval_samples_per_second": 27.516,
77
+ "eval_steps_per_second": 6.879,
78
+ "step": 25000
79
+ },
80
+ {
81
+ "epoch": 1.75,
82
+ "learning_rate": 0.0004943460202721656,
83
+ "loss": 5.9057,
84
+ "step": 30000
85
+ },
86
+ {
87
+ "epoch": 1.75,
88
+ "eval_loss": 6.43818998336792,
89
+ "eval_runtime": 1791.0913,
90
+ "eval_samples_per_second": 27.731,
91
+ "eval_steps_per_second": 6.933,
92
+ "step": 30000
93
+ },
94
+ {
95
+ "epoch": 2.04,
96
+ "learning_rate": 0.0004922392052032267,
97
+ "loss": 5.9076,
98
+ "step": 35000
99
+ },
100
+ {
101
+ "epoch": 2.04,
102
+ "eval_loss": 6.455899238586426,
103
+ "eval_runtime": 1805.3008,
104
+ "eval_samples_per_second": 27.513,
105
+ "eval_steps_per_second": 6.879,
106
+ "step": 35000
107
+ },
108
+ {
109
+ "epoch": 2.33,
110
+ "learning_rate": 0.0004898059684069413,
111
+ "loss": 5.8996,
112
+ "step": 40000
113
+ },
114
+ {
115
+ "epoch": 2.33,
116
+ "eval_loss": 6.460456848144531,
117
+ "eval_runtime": 1805.5197,
118
+ "eval_samples_per_second": 27.51,
119
+ "eval_steps_per_second": 6.878,
120
+ "step": 40000
121
+ },
122
+ {
123
+ "epoch": 2.63,
124
+ "learning_rate": 0.00048704920398382135,
125
+ "loss": 5.9083,
126
+ "step": 45000
127
+ },
128
+ {
129
+ "epoch": 2.63,
130
+ "eval_loss": 6.430952072143555,
131
+ "eval_runtime": 1807.4508,
132
+ "eval_samples_per_second": 27.48,
133
+ "eval_steps_per_second": 6.87,
134
+ "step": 45000
135
+ }
136
+ ],
137
+ "max_steps": 428375,
138
+ "num_train_epochs": 25,
139
+ "total_flos": 7.5543354473762e+17,
140
+ "trial_name": null,
141
+ "trial_params": null
142
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20995a35255e755147fe73406a5b5e0d9face3602f8680abb97f54e0f4ea86b1
3
+ size 3119
vocab.txt ADDED
The diff for this file is too large to render. See raw diff