Neuroinformatica commited on
Commit
3e065b8
1 Parent(s): e6d8284

Upload 10 files

Browse files
language_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad57bab6dbdfeb1cd3848ad84cc585eab14f2d71a36d69c2226f8a2737380fd3
3
+ size 439792945
language_model_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "IVN-RIN/bioBIT",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "language": "english",
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "name": "Bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 0,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.25.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 31102
27
+ }
prediction_head_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17af7496c3dd2eb092914c20d385e65a1da90443e222506d4830664f528c344f
3
+ size 7587
prediction_head_0_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"training": false, "layer_dims": [768, 2], "num_labels": 2, "ph_output_type": "per_token_squad", "model_type": "span_classification", "task_name": "question_answering", "no_ans_boost": 0.0, "context_window_size": 150, "n_best": 4, "n_best_per_sample": 1, "duplicate_filtering": 0, "use_confidence_scores_for_ranking": true, "use_no_answer_legacy_confidence": false, "label_tensor_name": "question_answering_label_ids", "label_list": ["start_token", "end_token"], "metric": "squad", "name": "QuestionAnsweringHead"}
processor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"data_dir": "data", "dev_filename": "dev-v2.0.json", "dev_split": 0, "doc_stride": 128, "max_answers": 6, "max_query_length": 64, "max_seq_len": 256, "multithreading_rust": true, "ph_output_type": "per_token_squad", "proxies": null, "sp_toks_end": 1, "sp_toks_mid": 1, "sp_toks_start": 1, "tasks": {"question_answering": {"label_list": ["start_token", "end_token"], "metric": "squad", "label_tensor_name": "question_answering_label_ids", "label_name": "question_answering_label", "label_column_name": null, "text_column_name": null, "task_type": null}}, "test_filename": null, "train_filename": "train-v2.0.json", "tokenizer": "BertTokenizerFast", "processor": "SquadProcessor"}
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
test_performance.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "EM": 78.12244897959184,
3
+ "f1": 84.74487389969126,
4
+ "top_n_accuracy": 97.83673469387755,
5
+ "top_n": 4,
6
+ "EM_text_answer": 66.49316851008457,
7
+ "f1_text_answer": 77.0494086234506,
8
+ "top_n_accuracy_text_answer": 96.55172413793103,
9
+ "top_n_EM_text_answer": 71.17761873780091,
10
+ "top_n_f1_text_answer": 88.40625292515978,
11
+ "Total_text_answer": 1537,
12
+ "EM_no_answer": 97.69989047097481,
13
+ "f1_no_answer": 97.69989047097481,
14
+ "top_n_accuracy_no_answer": 100.0,
15
+ "Total_no_answer": 913
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "max_len": 512,
7
+ "model_max_length": 512,
8
+ "name_or_path": "IVN-RIN/bioBIT",
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "special_tokens_map_file": null,
13
+ "strip_accents": null,
14
+ "tokenize_chinese_chars": true,
15
+ "tokenizer_class": "BertTokenizer",
16
+ "truncation": true,
17
+ "unk_token": "[UNK]"
18
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff