tarasophia commited on
Commit
ef82cb4
·
1 Parent(s): 80bab99

Upload 7 files

Browse files
Files changed (4) hide show
  1. config.json +39 -3
  2. pytorch_model.bin +2 -2
  3. tokenizer.json +16 -2
  4. training_args.bin +1 -1
config.json CHANGED
@@ -1,16 +1,51 @@
1
  {
2
- "_name_or_path": "bert-base-cased",
3
  "architectures": [
4
- "BertForMaskedLM"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
- "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "initializer_range": 0.02,
13
  "intermediate_size": 3072,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  "layer_norm_eps": 1e-12,
15
  "max_position_embeddings": 512,
16
  "model_type": "bert",
@@ -18,6 +53,7 @@
18
  "num_hidden_layers": 12,
19
  "pad_token_id": 0,
20
  "position_embedding_type": "absolute",
 
21
  "torch_dtype": "float32",
22
  "transformers_version": "4.23.1",
23
  "type_vocab_size": 2,
 
1
  {
2
+ "_name_or_path": "emilyalsentzer/Bio_ClinicalBERT",
3
  "architectures": [
4
+ "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
 
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9",
22
+ "10": "LABEL_10",
23
+ "11": "LABEL_11",
24
+ "12": "LABEL_12",
25
+ "13": "LABEL_13",
26
+ "14": "LABEL_14",
27
+ "15": "LABEL_15"
28
+ },
29
  "initializer_range": 0.02,
30
  "intermediate_size": 3072,
31
+ "label2id": {
32
+ "LABEL_0": 0,
33
+ "LABEL_1": 1,
34
+ "LABEL_10": 10,
35
+ "LABEL_11": 11,
36
+ "LABEL_12": 12,
37
+ "LABEL_13": 13,
38
+ "LABEL_14": 14,
39
+ "LABEL_15": 15,
40
+ "LABEL_2": 2,
41
+ "LABEL_3": 3,
42
+ "LABEL_4": 4,
43
+ "LABEL_5": 5,
44
+ "LABEL_6": 6,
45
+ "LABEL_7": 7,
46
+ "LABEL_8": 8,
47
+ "LABEL_9": 9
48
+ },
49
  "layer_norm_eps": 1e-12,
50
  "max_position_embeddings": 512,
51
  "model_type": "bert",
 
53
  "num_hidden_layers": 12,
54
  "pad_token_id": 0,
55
  "position_embedding_type": "absolute",
56
+ "problem_type": "single_label_classification",
57
  "torch_dtype": "float32",
58
  "transformers_version": "4.23.1",
59
  "type_vocab_size": 2,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd4b72a69fbbfced8adabf23a5ff1c9b7e25acbbcc0af1f5a2acc72331df7f70
3
- size 433431979
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecd7a3d6df4172a7a9a60700cda2572d322eba6ee7a5adf76ead65620e95d291
3
+ size 433361325
tokenizer.json CHANGED
@@ -1,7 +1,21 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 512,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 512
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 0,
16
+ "pad_type_id": 0,
17
+ "pad_token": "[PAD]"
18
+ },
19
  "added_tokens": [
20
  {
21
  "id": 0,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:101bf7a9516cf21cad87ec7c531ad1aa093a90192a7ae509f2395d5d5d3e4d44
3
  size 3439
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ac280045750fa6092ae17946838443f6e70dbce22ca1a48175def4d1c4312b3
3
  size 3439