Hojjat commited on
Commit
534355c
1 Parent(s): 4e44411

Training in progress, step 500

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 64,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 256,
12
+ "layer_norm_eps": 1e-12,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "bert",
15
+ "num_attention_heads": 4,
16
+ "num_hidden_layers": 4,
17
+ "pad_token_id": 0,
18
+ "position_embedding_type": "absolute",
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.27.3",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 26
24
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:489e3f41e6d840f33e49b8bc28cf02be7345ac586229f37beda557954c4aca18
3
+ size 985042
runs/Apr04_14-12-18_15aeff423f3a/1680617609.3427448/events.out.tfevents.1680617609.15aeff423f3a.31229.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8f5e4002545bc9c7ffc7a26ff71d59a1e8d858bd0b07e5e52c0aea437640285
3
+ size 5829
runs/Apr04_14-12-18_15aeff423f3a/1680617907.0042508/events.out.tfevents.1680617907.15aeff423f3a.31229.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834984940c5c9aaecd6ce8b582991a5855a3e3d3f2351c748bbc1cab6b33b3c4
3
+ size 5829
runs/Apr04_14-12-18_15aeff423f3a/events.out.tfevents.1680617593.15aeff423f3a.31229.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf997a2a3b594e701095037a16f5771c54a612d09473d531c7198ec6138609ca
3
+ size 5684
runs/Apr04_14-12-18_15aeff423f3a/events.out.tfevents.1680617904.15aeff423f3a.31229.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfb3fa43e2a755506f86bb4293e346a399d77b912b8b5c9bfce66c1a462c1d03
3
+ size 5684
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "special": true,
9
+ "content": "[UNK]",
10
+ "single_word": false,
11
+ "lstrip": false,
12
+ "rstrip": false,
13
+ "normalized": false
14
+ },
15
+ {
16
+ "id": 1,
17
+ "special": true,
18
+ "content": "[CLS]",
19
+ "single_word": false,
20
+ "lstrip": false,
21
+ "rstrip": false,
22
+ "normalized": false
23
+ },
24
+ {
25
+ "id": 2,
26
+ "special": true,
27
+ "content": "[SEP]",
28
+ "single_word": false,
29
+ "lstrip": false,
30
+ "rstrip": false,
31
+ "normalized": false
32
+ },
33
+ {
34
+ "id": 3,
35
+ "special": true,
36
+ "content": "[PAD]",
37
+ "single_word": false,
38
+ "lstrip": false,
39
+ "rstrip": false,
40
+ "normalized": false
41
+ },
42
+ {
43
+ "id": 4,
44
+ "special": true,
45
+ "content": "[MASK]",
46
+ "single_word": false,
47
+ "lstrip": false,
48
+ "rstrip": false,
49
+ "normalized": false
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "Sequence",
54
+ "normalizers": [
55
+ {
56
+ "type": "NFD"
57
+ },
58
+ {
59
+ "type": "Lowercase"
60
+ },
61
+ {
62
+ "type": "StripAccents"
63
+ }
64
+ ]
65
+ },
66
+ "pre_tokenizer": {
67
+ "type": "Whitespace"
68
+ },
69
+ "post_processor": {
70
+ "type": "TemplateProcessing",
71
+ "single": [
72
+ {
73
+ "SpecialToken": {
74
+ "id": "[CLS]",
75
+ "type_id": 0
76
+ }
77
+ },
78
+ {
79
+ "Sequence": {
80
+ "id": "A",
81
+ "type_id": 0
82
+ }
83
+ },
84
+ {
85
+ "SpecialToken": {
86
+ "id": "[SEP]",
87
+ "type_id": 0
88
+ }
89
+ }
90
+ ],
91
+ "pair": [
92
+ {
93
+ "SpecialToken": {
94
+ "id": "[CLS]",
95
+ "type_id": 0
96
+ }
97
+ },
98
+ {
99
+ "Sequence": {
100
+ "id": "A",
101
+ "type_id": 0
102
+ }
103
+ },
104
+ {
105
+ "SpecialToken": {
106
+ "id": "[SEP]",
107
+ "type_id": 0
108
+ }
109
+ },
110
+ {
111
+ "Sequence": {
112
+ "id": "B",
113
+ "type_id": 1
114
+ }
115
+ },
116
+ {
117
+ "SpecialToken": {
118
+ "id": "[SEP]",
119
+ "type_id": 1
120
+ }
121
+ }
122
+ ],
123
+ "special_tokens": {
124
+ "[CLS]": {
125
+ "id": "[CLS]",
126
+ "ids": [
127
+ 1
128
+ ],
129
+ "tokens": [
130
+ "[CLS]"
131
+ ]
132
+ },
133
+ "[SEP]": {
134
+ "id": "[SEP]",
135
+ "ids": [
136
+ 2
137
+ ],
138
+ "tokens": [
139
+ "[SEP]"
140
+ ]
141
+ }
142
+ }
143
+ },
144
+ "decoder": null,
145
+ "model": {
146
+ "type": "WordLevel",
147
+ "vocab": {
148
+ "[UNK]": 0,
149
+ "[CLS]": 1,
150
+ "[SEP]": 2,
151
+ "[PAD]": 3,
152
+ "[MASK]": 4,
153
+ ".": 5,
154
+ "nothing": 6,
155
+ "fio2": 7,
156
+ "ph": 8,
157
+ "paco2": 9,
158
+ "pao2": 10,
159
+ "hct": 11,
160
+ "k": 12,
161
+ "creatinine": 13,
162
+ "platelets": 14,
163
+ "bun": 15,
164
+ "mg": 16,
165
+ "hco3": 17,
166
+ "na": 18,
167
+ "glucose": 19,
168
+ "wbc": 20,
169
+ "lactate": 21,
170
+ "sao2": 22,
171
+ "ast": 23,
172
+ "bilirubin": 24,
173
+ "alt": 25
174
+ },
175
+ "unk_token": "[UNK]"
176
+ }
177
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "model_max_length": 1000000000000000019884624838656,
5
+ "pad_token": "[PAD]",
6
+ "sep_token": "[SEP]",
7
+ "tokenizer_class": "PreTrainedTokenizerFast",
8
+ "unk_token": "[UNK]"
9
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae3a5909391b40ca695aeda0ed68ebd68ac17a2c0a091be4d2cd64d68ffea70
3
+ size 3643