pruas commited on
Commit
eaf6c70
1 Parent(s): b79b934

Upload 11 files

Browse files
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "O",
13
+ "1": "B",
14
+ "2": "I"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "B": "1",
20
+ "I": "2",
21
+ "O": "0"
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.9.0",
32
+ "type_vocab_size": 2,
33
+ "use_cache": true,
34
+ "vocab_size": 30522
35
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bc663875955232200d042e41de8332f83bc1f386b7cd955831b8ca84deee379
3
+ size 871267045
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1312af2938c5236bf5a6f3dfb8c2da6f0d1274db1ada6ac25f4436f954c37046
3
+ size 435659249
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2973d175053ff67292cad22751e42b13cc49bc3362ac2157656c7390f391db04
3
+ size 17563
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e97f32ac3d87678cffbf4ed0b32281e1d571125c0e80733565722b883fe54427
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "name_or_path": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.00010504200326977298,
3
+ "best_model_checkpoint": "data/models/bioprocess/final/checkpoint-1955",
4
+ "epoch": 1.0,
5
+ "global_step": 1955,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.26,
12
+ "learning_rate": 7.501994884910486e-05,
13
+ "loss": 0.0572,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.51,
18
+ "learning_rate": 4.923989769820972e-05,
19
+ "loss": 0.0285,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.77,
24
+ "learning_rate": 2.345984654731458e-05,
25
+ "loss": 0.0201,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 1.0,
30
+ "eval_accuracy": 1.0,
31
+ "eval_f1": 0.0,
32
+ "eval_loss": 0.00010504200326977298,
33
+ "eval_precision": 0.0,
34
+ "eval_recall": 0.0,
35
+ "eval_runtime": 1.4118,
36
+ "eval_samples_per_second": 0.708,
37
+ "eval_steps_per_second": 0.708,
38
+ "step": 1955
39
+ }
40
+ ],
41
+ "max_steps": 1955,
42
+ "num_train_epochs": 1,
43
+ "total_flos": 8170823454382080.0,
44
+ "trial_name": null,
45
+ "trial_params": null
46
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac089e01504bfbebf1c59762286d5a330d684d6146e1ea502eabfba531a6ab3b
3
+ size 2671
vocab.txt ADDED
The diff for this file is too large to render. See raw diff