pabRomero commited on
Commit
9d93078
1 Parent(s): 300c1b1

Training in progress, epoch 0

Browse files
config.json CHANGED
@@ -1,12 +1,10 @@
1
  {
2
- "_name_or_path": "allenai/biomed_roberta_base",
3
  "architectures": [
4
- "RobertaForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
  "classifier_dropout": null,
9
- "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
@@ -54,16 +52,16 @@
54
  "I-Strength": 8,
55
  "O": 0
56
  },
57
- "layer_norm_eps": 1e-05,
58
- "max_position_embeddings": 514,
59
- "model_type": "roberta",
60
  "num_attention_heads": 12,
61
  "num_hidden_layers": 12,
62
- "pad_token_id": 1,
63
  "position_embedding_type": "absolute",
64
  "torch_dtype": "float32",
65
  "transformers_version": "4.44.1",
66
- "type_vocab_size": 1,
67
  "use_cache": true,
68
- "vocab_size": 50265
69
  }
 
1
  {
2
+ "_name_or_path": "emilyalsentzer/Bio_ClinicalBERT",
3
  "architectures": [
4
+ "BertForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "classifier_dropout": null,
 
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
 
52
  "I-Strength": 8,
53
  "O": 0
54
  },
55
+ "layer_norm_eps": 1e-12,
56
+ "max_position_embeddings": 512,
57
+ "model_type": "bert",
58
  "num_attention_heads": 12,
59
  "num_hidden_layers": 12,
60
+ "pad_token_id": 0,
61
  "position_embedding_type": "absolute",
62
  "torch_dtype": "float32",
63
  "transformers_version": "4.44.1",
64
+ "type_vocab_size": 2,
65
  "use_cache": true,
66
+ "vocab_size": 28996
67
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a696ce6737184cbc65a9bfeec25f2e0cf52196fb18b56f571aa6d3d8735779dc
3
- size 496302532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a66127805cfc2dc84edbf7dd44a64ba5b58cd5b6953b7a8a8754e6338c4ca1ed
3
+ size 430960500
runs/Aug22_11-42-16_a2d1771db0e9/events.out.tfevents.1724326938.a2d1771db0e9.2970.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:632fa37f6432f24dd1072cba96b694e9c9f7f55ca0a12fa84e4e2699c47bd08d
3
+ size 5759
runs/Aug22_11-43-12_a2d1771db0e9/events.out.tfevents.1724326993.a2d1771db0e9.2970.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aabd9b82e04f7b4398fe32caa6349a4e0be6ac0da87a872ac7e260878cdfce6
3
+ size 6222
runs/Aug22_11-44-44_a2d1771db0e9/events.out.tfevents.1724327085.a2d1771db0e9.2970.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:065126abe15e23579e63536eedab9d883a30488410f88b5cb81bfaf0371f278d
3
+ size 6222
special_tokens_map.json CHANGED
@@ -1,51 +1,7 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "<s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "<mask>",
25
- "lstrip": true,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "<pad>",
32
- "lstrip": false,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "</s>",
39
- "lstrip": false,
40
- "normalized": true,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
- "unk_token": {
45
- "content": "<unk>",
46
- "lstrip": false,
47
- "normalized": true,
48
- "rstrip": false,
49
- "single_word": false
50
- }
51
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -2,57 +2,57 @@
2
  "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "<s>",
6
  "lstrip": false,
7
- "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
- "1": {
13
- "content": "<pad>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
- "2": {
21
- "content": "</s>",
22
  "lstrip": false,
23
- "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
- "3": {
29
- "content": "<unk>",
30
  "lstrip": false,
31
- "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
- "50264": {
37
- "content": "<mask>",
38
- "lstrip": true,
39
  "normalized": false,
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  }
44
  },
45
- "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": true,
47
- "cls_token": "<s>",
48
- "eos_token": "</s>",
49
- "errors": "replace",
50
- "mask_token": "<mask>",
51
- "max_len": 512,
52
- "model_max_length": 512,
53
- "pad_token": "<pad>",
54
- "sep_token": "</s>",
55
- "tokenizer_class": "RobertaTokenizer",
56
- "trim_offsets": true,
57
- "unk_token": "<unk>"
 
58
  }
 
2
  "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
+ "content": "[PAD]",
6
  "lstrip": false,
7
+ "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
+ "100": {
13
+ "content": "[UNK]",
14
  "lstrip": false,
15
+ "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
+ "101": {
21
+ "content": "[CLS]",
22
  "lstrip": false,
23
+ "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
+ "102": {
29
+ "content": "[SEP]",
30
  "lstrip": false,
31
+ "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
+ "103": {
37
+ "content": "[MASK]",
38
+ "lstrip": false,
39
  "normalized": false,
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  }
44
  },
 
45
  "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": true,
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96273802c8cc1223e877d8bba44c74208f2e9431aec018e105f2271f06f8e427
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:016101e4371611dd545278710c56d9f256ca04c2c90c569c9343e3604c5f3e46
3
+ size 5304
vocab.txt ADDED
The diff for this file is too large to render. See raw diff