lanzv commited on
Commit
1d4a5ae
1 Parent(s): 303c022

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,10 +1,11 @@
1
  {
2
- "_name_or_path": "emilyalsentzer/Bio_ClinicalBERT",
3
  "architectures": [
4
  "BertForQuestionAnswering"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
 
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
@@ -16,10 +17,15 @@
16
  "num_attention_heads": 12,
17
  "num_hidden_layers": 12,
18
  "pad_token_id": 0,
 
 
 
 
 
19
  "position_embedding_type": "absolute",
20
  "torch_dtype": "float32",
21
  "transformers_version": "4.38.2",
22
  "type_vocab_size": 2,
23
  "use_cache": true,
24
- "vocab_size": 28996
25
  }
 
1
  {
2
+ "_name_or_path": "../models/bert-base-multilingual-cased",
3
  "architectures": [
4
  "BertForQuestionAnswering"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
+ "directionality": "bidi",
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
 
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 12,
19
  "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
  "position_embedding_type": "absolute",
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.38.2",
28
  "type_vocab_size": 2,
29
  "use_cache": true,
30
+ "vocab_size": 119547
31
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e56cf56ef2a752bc5165123ac9e04e1ccb834b618cad11a83e7ff10004c11c7f
3
- size 430908208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73a7e84ae34755d8569f2747815bd862298dad83e48c9218c9e1ae9dd345de8b
3
+ size 709080904
runs/Apr27_03-04-03_tdll-3gpu2/events.out.tfevents.1714179844.tdll-3gpu2.496520.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6bd886b5c89e58b730e49b3a005eb56204ac27978a01d9905ee50a0ff7fc573
3
+ size 5025
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -43,11 +43,9 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
- "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
- "model_max_length": 1000000000000000019884624838656,
50
- "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
  "strip_accents": null,
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_lower_case": false,
 
47
  "mask_token": "[MASK]",
48
+ "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
  "strip_accents": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae496685213172bd373a6445306bf26bfebf3741f12ab19b473645cb0a890c77
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d6e90486ebfae0c4b735002e4744ba790ff44cd5443c2a817f915b3084b48f
3
  size 4920
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff