Ubuntu commited on
Commit
641c279
1 Parent(s): 9300674
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/deberta-v3-large",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 1024,
10
+ "id2label": {
11
+ "0": "LABEL_0"
12
+ },
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_eps": 1e-07,
19
+ "max_position_embeddings": 512,
20
+ "max_relative_positions": -1,
21
+ "model_type": "deberta-v2",
22
+ "norm_rel_ebd": "layer_norm",
23
+ "num_attention_heads": 16,
24
+ "num_hidden_layers": 24,
25
+ "pad_token_id": 0,
26
+ "pooler_dropout": 0,
27
+ "pooler_hidden_act": "gelu",
28
+ "pooler_hidden_size": 1024,
29
+ "pos_att_type": [
30
+ "p2c",
31
+ "c2p"
32
+ ],
33
+ "position_biased_input": false,
34
+ "position_buckets": 256,
35
+ "relative_attention": true,
36
+ "share_att_key": true,
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.25.1",
39
+ "type_vocab_size": 0,
40
+ "vocab_size": 128100
41
+ }
eval.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"eval_loss": 0.6821334362030029, "eval_rmse": 0.8259137272834778, "eval_spearman": 0.6366639193422842, "eval_pearson": 0.6731962999298167, "eval_runtime": 2.0034, "eval_samples_per_second": 120.795, "eval_steps_per_second": 15.474, "epoch": 1.0, "eval_set": "osu"}
2
+ {"eval_loss": 1.3671470880508423, "eval_rmse": 1.1692506074905396, "eval_spearman": 0.46622255846621397, "eval_pearson": 0.4136858459894459, "eval_runtime": 13.114, "eval_samples_per_second": 139.012, "eval_steps_per_second": 17.386, "epoch": 1.0, "eval_set": "healthver"}
3
+ {"eval_loss": 0.7539231181144714, "eval_rmse": 0.8682874441146851, "eval_spearman": 0.5488431129607819, "eval_pearson": 0.538707782127853, "eval_runtime": 0.8721, "eval_samples_per_second": 136.452, "eval_steps_per_second": 17.2, "epoch": 1.0, "eval_set": "deepset_1"}
4
+ {"eval_loss": 0.49185943603515625, "eval_rmse": 0.7013269662857056, "eval_spearman": 0.6717814226965486, "eval_pearson": 0.691180339379398, "eval_runtime": 1.1756, "eval_samples_per_second": 132.7, "eval_steps_per_second": 17.013, "epoch": 1.0, "eval_set": "deepset_2"}
5
+ {"eval_loss": 0.9618063569068909, "eval_rmse": 0.9807172417640686, "eval_spearman": 0.3984599642180852, "eval_pearson": 0.41185545800624546, "eval_runtime": 2.5757, "eval_samples_per_second": 134.33, "eval_steps_per_second": 17.082, "epoch": 1.0, "eval_set": "deepset_3"}
6
+ {"eval_loss": 0.7669643759727478, "eval_rmse": 0.8757649064064026, "eval_spearman": 0.4542610937998787, "eval_pearson": 0.4986133277496825, "eval_runtime": 1.5198, "eval_samples_per_second": 135.546, "eval_steps_per_second": 17.108, "epoch": 1.0, "eval_set": "deepset_4"}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2085c6d3c342fc79ad74d556a5a86c45a92a27f914fe8e768c51d211c4ee92a7
3
+ size 1740389291
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "name_or_path": "microsoft/deberta-v3-large",
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "sp_model_kwargs": {},
12
+ "special_tokens_map_file": null,
13
+ "split_by_punct": false,
14
+ "tokenizer_class": "DebertaV2Tokenizer",
15
+ "unk_token": "[UNK]",
16
+ "vocab_type": "spm"
17
+ }
training_cfg.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "microsoft/deberta-v3-large",
3
+ "lr": 6e-06,
4
+ "epochs": 1,
5
+ "weight_decay": 0.01,
6
+ "train_bs": 3,
7
+ "eval_bs": 8,
8
+ "gradient_accumulation_steps": 8,
9
+ "gradient_checkpointing": false,
10
+ "eval_datasets": [
11
+ {"path": "./data/all_eval/osu_eval.csv", "name": "osu"},
12
+ {"path": "./data/all_eval/healthver_eval.csv", "name": "healthver"},
13
+ {"path": "./data/all_eval/deepset_1_eval.csv", "name": "deepset_1"},
14
+ {"path": "./data/all_eval/deepset_2_eval.csv", "name": "deepset_2"},
15
+ {"path": "./data/all_eval/deepset_3_eval.csv", "name": "deepset_3"},
16
+ {"path": "./data/all_eval/deepset_4_eval.csv", "name": "deepset_4"}
17
+ ],
18
+ "train_dataset_path": "./data/all_training/all_train_v2_pseudo.csv",
19
+ "ensemble_model_predictions": [
20
+ "mathislucka/deberta-large-hallucination-eval-v2",
21
+ "mathislucka/deberta-base-hallucination-eval-v2",
22
+ "models/albert-xxlarge-v2-optim-data-v1",
23
+ "models/deberta-base-v3-no-atomic-wfc-nq",
24
+ "models/deberta-v3-large-data-optim-v2"
25
+ ],
26
+ "half_precision": true
27
+ }