AI-Ahmed commited on
Commit
89ecc9b
1 Parent(s): 95a6832

Upload with huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +29 -3
  2. best_ckpt.pth +3 -0
  3. checkpoint-1421/added_tokens.json +3 -0
  4. checkpoint-1421/config.json +35 -0
  5. checkpoint-1421/optimizer.pt +3 -0
  6. checkpoint-1421/pytorch_model.bin +3 -0
  7. checkpoint-1421/rng_state.pth +3 -0
  8. checkpoint-1421/scaler.pt +3 -0
  9. checkpoint-1421/scheduler.pt +3 -0
  10. checkpoint-1421/special_tokens_map.json +9 -0
  11. checkpoint-1421/spm.model +3 -0
  12. checkpoint-1421/tokenizer.json +0 -0
  13. checkpoint-1421/tokenizer_config.json +16 -0
  14. checkpoint-1421/trainer_state.json +56 -0
  15. checkpoint-1421/training_args.bin +3 -0
  16. checkpoint-2842/added_tokens.json +3 -0
  17. checkpoint-2842/config.json +35 -0
  18. checkpoint-2842/optimizer.pt +3 -0
  19. checkpoint-2842/pytorch_model.bin +3 -0
  20. checkpoint-2842/rng_state.pth +3 -0
  21. checkpoint-2842/scaler.pt +3 -0
  22. checkpoint-2842/scheduler.pt +3 -0
  23. checkpoint-2842/special_tokens_map.json +9 -0
  24. checkpoint-2842/spm.model +3 -0
  25. checkpoint-2842/tokenizer.json +0 -0
  26. checkpoint-2842/tokenizer_config.json +16 -0
  27. checkpoint-2842/trainer_state.json +102 -0
  28. checkpoint-2842/training_args.bin +3 -0
  29. checkpoint-4263/added_tokens.json +3 -0
  30. checkpoint-4263/config.json +35 -0
  31. checkpoint-4263/optimizer.pt +3 -0
  32. checkpoint-4263/pytorch_model.bin +3 -0
  33. checkpoint-4263/rng_state.pth +3 -0
  34. checkpoint-4263/scaler.pt +3 -0
  35. checkpoint-4263/scheduler.pt +3 -0
  36. checkpoint-4263/special_tokens_map.json +9 -0
  37. checkpoint-4263/spm.model +3 -0
  38. checkpoint-4263/tokenizer.json +0 -0
  39. checkpoint-4263/tokenizer_config.json +16 -0
  40. checkpoint-4263/trainer_state.json +148 -0
  41. checkpoint-4263/training_args.bin +3 -0
  42. checkpoint-5684/added_tokens.json +3 -0
  43. checkpoint-5684/config.json +35 -0
  44. checkpoint-5684/optimizer.pt +3 -0
  45. checkpoint-5684/pytorch_model.bin +3 -0
  46. checkpoint-5684/rng_state.pth +3 -0
  47. checkpoint-5684/scaler.pt +3 -0
  48. checkpoint-5684/scheduler.pt +3 -0
  49. checkpoint-5684/special_tokens_map.json +9 -0
  50. checkpoint-5684/spm.model +3 -0
README.md CHANGED
@@ -1,3 +1,29 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A fine-tuned model based on the **DeBERTaV3** model of Microsoft and fine-tuned on **Glue QQP**, which detects the linguistical similarities between two questions and whether they are similar questions or dissimilar.
2
+
3
+ ## Model Hyperparameters
4
+
5
+ ```python
6
+ epoch=4
7
+ per_device_train_batch_size=32
8
+ per_device_eval_batch_size=16
9
+ lr=2e-5
10
+ weight_decay=1e-2
11
+ gradient_checkpointing=True
12
+ gradient_accumulation_steps=8
13
+ ```
14
+ ## Model Performance
15
+
16
+ ```JSON
17
+ {"Training Loss": 0.132400,
18
+ "Validation Loss": 0.217410,
19
+ "Validation Accuracy": 0.917969
20
+ }
21
+ ```
22
+
23
+ ## Model Dependencies
24
+
25
+ ```JSON
26
+ {"Main Model": "microsoft/deberta-v3-base",
27
+ "Dataset": "SetFit/qqp"
28
+ }
29
+ ```
best_ckpt.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3df78bb0e84782f642a9b1bef8de67169c388b3693ee48b1d9220349dfeed756
3
+ size 737782699
checkpoint-1421/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
checkpoint-1421/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/deberta-v3-base",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-07,
13
+ "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "norm_rel_ebd": "layer_norm",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_dropout": 0,
21
+ "pooler_hidden_act": "gelu",
22
+ "pooler_hidden_size": 768,
23
+ "pos_att_type": [
24
+ "p2c",
25
+ "c2p"
26
+ ],
27
+ "position_biased_input": false,
28
+ "position_buckets": 256,
29
+ "relative_attention": true,
30
+ "share_att_key": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.24.0",
33
+ "type_vocab_size": 0,
34
+ "vocab_size": 128100
35
+ }
checkpoint-1421/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd1a50d4937b9f21324e5ac8f11fa07c5b240587076d0a49af86568b810846d1
3
+ size 1475556165
checkpoint-1421/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1287312a4a834efa99b26fe3cb9326cd4719a10068875db89b53dab2f4526f72
3
+ size 737766955
checkpoint-1421/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64c71667ddea107636a065cbc131d35d75bf32d9f87fa554b4fba9e2d87200d5
3
+ size 14503
checkpoint-1421/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7afb333ba9bc50bd3301ebb72540795fdde8e265f3b435eef901a1d50a5a8c03
3
+ size 559
checkpoint-1421/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:552ec4a0a5613f610ace5350aff6aaf73695ecc08b302f92f760425d2a7a335f
3
+ size 559
checkpoint-1421/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
checkpoint-1421/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
checkpoint-1421/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1421/tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "microsoft/deberta-v3-base",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]",
15
+ "vocab_type": "spm"
16
+ }
checkpoint-1421/trainer_state.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.22764553129673004,
3
+ "best_model_checkpoint": "deberta-v3-base-funetuned-qqa/checkpoint-1421",
4
+ "epoch": 0.9997361709612171,
5
+ "global_step": 1421,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.18,
12
+ "learning_rate": 1.912033779028853e-05,
13
+ "loss": 0.3671,
14
+ "step": 250
15
+ },
16
+ {
17
+ "epoch": 0.35,
18
+ "learning_rate": 1.824067558057706e-05,
19
+ "loss": 0.2758,
20
+ "step": 500
21
+ },
22
+ {
23
+ "epoch": 0.53,
24
+ "learning_rate": 1.736101337086559e-05,
25
+ "loss": 0.2616,
26
+ "step": 750
27
+ },
28
+ {
29
+ "epoch": 0.7,
30
+ "learning_rate": 1.6481351161154117e-05,
31
+ "loss": 0.2469,
32
+ "step": 1000
33
+ },
34
+ {
35
+ "epoch": 0.88,
36
+ "learning_rate": 1.5601688951442647e-05,
37
+ "loss": 0.235,
38
+ "step": 1250
39
+ },
40
+ {
41
+ "epoch": 1.0,
42
+ "eval_accuracy": 0.90576171875,
43
+ "eval_binary_crossentropy_loss": 9.43606185913086,
44
+ "eval_loss": 0.22764553129673004,
45
+ "eval_runtime": 148.7776,
46
+ "eval_samples_per_second": 271.748,
47
+ "eval_steps_per_second": 16.985,
48
+ "step": 1421
49
+ }
50
+ ],
51
+ "max_steps": 5684,
52
+ "num_train_epochs": 4,
53
+ "total_flos": 2.393340547233485e+16,
54
+ "trial_name": null,
55
+ "trial_params": null
56
+ }
checkpoint-1421/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:824f49e09d1e24e24b56eb55553854b0550b51f9807cc4bcc80575e87eb1efe4
3
+ size 3439
checkpoint-2842/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
checkpoint-2842/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/deberta-v3-base",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-07,
13
+ "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "norm_rel_ebd": "layer_norm",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_dropout": 0,
21
+ "pooler_hidden_act": "gelu",
22
+ "pooler_hidden_size": 768,
23
+ "pos_att_type": [
24
+ "p2c",
25
+ "c2p"
26
+ ],
27
+ "position_biased_input": false,
28
+ "position_buckets": 256,
29
+ "relative_attention": true,
30
+ "share_att_key": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.24.0",
33
+ "type_vocab_size": 0,
34
+ "vocab_size": 128100
35
+ }
checkpoint-2842/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f280be97c0952ef3e97d8901189e366db805a8c61dcc7d4305ef8fdb28f4a0b9
3
+ size 1475556165
checkpoint-2842/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71a2f3358a96959d1fd5bb62b1bf065d36f1246b511749c6e91ae8bb366462cc
3
+ size 737766955
checkpoint-2842/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df17ed2df95769819f66e6ed49342c944e9cc81ada6efd65f6937d5b6b0dc237
3
+ size 14503
checkpoint-2842/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4122359f72635429fa624445f1661c497be1cc014af92fc0d1f749759c6b58b1
3
+ size 559
checkpoint-2842/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca91d8012253dd5f50cc48273ec54d2f325ad0c0852a076e1324fe9ebd684b3c
3
+ size 559
checkpoint-2842/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
checkpoint-2842/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
checkpoint-2842/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2842/tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "microsoft/deberta-v3-base",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]",
15
+ "vocab_type": "spm"
16
+ }
checkpoint-2842/trainer_state.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.20898549258708954,
3
+ "best_model_checkpoint": "deberta-v3-base-funetuned-qqa/checkpoint-2842",
4
+ "epoch": 1.9997361709612171,
5
+ "global_step": 2842,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.18,
12
+ "learning_rate": 1.912033779028853e-05,
13
+ "loss": 0.3671,
14
+ "step": 250
15
+ },
16
+ {
17
+ "epoch": 0.35,
18
+ "learning_rate": 1.824067558057706e-05,
19
+ "loss": 0.2758,
20
+ "step": 500
21
+ },
22
+ {
23
+ "epoch": 0.53,
24
+ "learning_rate": 1.736101337086559e-05,
25
+ "loss": 0.2616,
26
+ "step": 750
27
+ },
28
+ {
29
+ "epoch": 0.7,
30
+ "learning_rate": 1.6481351161154117e-05,
31
+ "loss": 0.2469,
32
+ "step": 1000
33
+ },
34
+ {
35
+ "epoch": 0.88,
36
+ "learning_rate": 1.5601688951442647e-05,
37
+ "loss": 0.235,
38
+ "step": 1250
39
+ },
40
+ {
41
+ "epoch": 1.0,
42
+ "eval_accuracy": 0.90576171875,
43
+ "eval_binary_crossentropy_loss": 9.43606185913086,
44
+ "eval_loss": 0.22764553129673004,
45
+ "eval_runtime": 148.7776,
46
+ "eval_samples_per_second": 271.748,
47
+ "eval_steps_per_second": 16.985,
48
+ "step": 1421
49
+ },
50
+ {
51
+ "epoch": 1.06,
52
+ "learning_rate": 1.4722026741731177e-05,
53
+ "loss": 0.2228,
54
+ "step": 1500
55
+ },
56
+ {
57
+ "epoch": 1.23,
58
+ "learning_rate": 1.3842364532019705e-05,
59
+ "loss": 0.1956,
60
+ "step": 1750
61
+ },
62
+ {
63
+ "epoch": 1.41,
64
+ "learning_rate": 1.2962702322308235e-05,
65
+ "loss": 0.1994,
66
+ "step": 2000
67
+ },
68
+ {
69
+ "epoch": 1.58,
70
+ "learning_rate": 1.2083040112596764e-05,
71
+ "loss": 0.1957,
72
+ "step": 2250
73
+ },
74
+ {
75
+ "epoch": 1.76,
76
+ "learning_rate": 1.1203377902885292e-05,
77
+ "loss": 0.1893,
78
+ "step": 2500
79
+ },
80
+ {
81
+ "epoch": 1.94,
82
+ "learning_rate": 1.0323715693173822e-05,
83
+ "loss": 0.1908,
84
+ "step": 2750
85
+ },
86
+ {
87
+ "epoch": 2.0,
88
+ "eval_accuracy": 0.91357421875,
89
+ "eval_binary_crossentropy_loss": 8.624783515930176,
90
+ "eval_loss": 0.20898549258708954,
91
+ "eval_runtime": 148.8373,
92
+ "eval_samples_per_second": 271.639,
93
+ "eval_steps_per_second": 16.978,
94
+ "step": 2842
95
+ }
96
+ ],
97
+ "max_steps": 5684,
98
+ "num_train_epochs": 4,
99
+ "total_flos": 4.78668109446697e+16,
100
+ "trial_name": null,
101
+ "trial_params": null
102
+ }
checkpoint-2842/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:824f49e09d1e24e24b56eb55553854b0550b51f9807cc4bcc80575e87eb1efe4
3
+ size 3439
checkpoint-4263/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
checkpoint-4263/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/deberta-v3-base",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-07,
13
+ "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "norm_rel_ebd": "layer_norm",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_dropout": 0,
21
+ "pooler_hidden_act": "gelu",
22
+ "pooler_hidden_size": 768,
23
+ "pos_att_type": [
24
+ "p2c",
25
+ "c2p"
26
+ ],
27
+ "position_biased_input": false,
28
+ "position_buckets": 256,
29
+ "relative_attention": true,
30
+ "share_att_key": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.24.0",
33
+ "type_vocab_size": 0,
34
+ "vocab_size": 128100
35
+ }
checkpoint-4263/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6ffd88cf0814c3cff051615a0ca02b7de2d9ecddbbe8426289a4343fd344824
3
+ size 1475556165
checkpoint-4263/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e2d4995cbb293d758e532f6f64f4f7a8b8d99262f71f11e4423ea2b0b871fce
3
+ size 737766955
checkpoint-4263/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4ec02662cb6e351dc958e648f9561454b4378240f801e2312867fca9b3e8b6c
3
+ size 14503
checkpoint-4263/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80e3586cc756d547702508d1cb28650a4f7a15c38a1f8d0f2c449fa8814c3dfb
3
+ size 559
checkpoint-4263/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed99f924de94e7d256684b4dba0d7474d73ac90b13d10de522a3d324e3ce7e2a
3
+ size 559
checkpoint-4263/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
checkpoint-4263/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
checkpoint-4263/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4263/tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "microsoft/deberta-v3-base",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]",
15
+ "vocab_type": "spm"
16
+ }
checkpoint-4263/trainer_state.json ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.20898549258708954,
3
+ "best_model_checkpoint": "deberta-v3-base-funetuned-qqa/checkpoint-2842",
4
+ "epoch": 2.999736170961217,
5
+ "global_step": 4263,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.18,
12
+ "learning_rate": 1.912033779028853e-05,
13
+ "loss": 0.3671,
14
+ "step": 250
15
+ },
16
+ {
17
+ "epoch": 0.35,
18
+ "learning_rate": 1.824067558057706e-05,
19
+ "loss": 0.2758,
20
+ "step": 500
21
+ },
22
+ {
23
+ "epoch": 0.53,
24
+ "learning_rate": 1.736101337086559e-05,
25
+ "loss": 0.2616,
26
+ "step": 750
27
+ },
28
+ {
29
+ "epoch": 0.7,
30
+ "learning_rate": 1.6481351161154117e-05,
31
+ "loss": 0.2469,
32
+ "step": 1000
33
+ },
34
+ {
35
+ "epoch": 0.88,
36
+ "learning_rate": 1.5601688951442647e-05,
37
+ "loss": 0.235,
38
+ "step": 1250
39
+ },
40
+ {
41
+ "epoch": 1.0,
42
+ "eval_accuracy": 0.90576171875,
43
+ "eval_binary_crossentropy_loss": 9.43606185913086,
44
+ "eval_loss": 0.22764553129673004,
45
+ "eval_runtime": 148.7776,
46
+ "eval_samples_per_second": 271.748,
47
+ "eval_steps_per_second": 16.985,
48
+ "step": 1421
49
+ },
50
+ {
51
+ "epoch": 1.06,
52
+ "learning_rate": 1.4722026741731177e-05,
53
+ "loss": 0.2228,
54
+ "step": 1500
55
+ },
56
+ {
57
+ "epoch": 1.23,
58
+ "learning_rate": 1.3842364532019705e-05,
59
+ "loss": 0.1956,
60
+ "step": 1750
61
+ },
62
+ {
63
+ "epoch": 1.41,
64
+ "learning_rate": 1.2962702322308235e-05,
65
+ "loss": 0.1994,
66
+ "step": 2000
67
+ },
68
+ {
69
+ "epoch": 1.58,
70
+ "learning_rate": 1.2083040112596764e-05,
71
+ "loss": 0.1957,
72
+ "step": 2250
73
+ },
74
+ {
75
+ "epoch": 1.76,
76
+ "learning_rate": 1.1203377902885292e-05,
77
+ "loss": 0.1893,
78
+ "step": 2500
79
+ },
80
+ {
81
+ "epoch": 1.94,
82
+ "learning_rate": 1.0323715693173822e-05,
83
+ "loss": 0.1908,
84
+ "step": 2750
85
+ },
86
+ {
87
+ "epoch": 2.0,
88
+ "eval_accuracy": 0.91357421875,
89
+ "eval_binary_crossentropy_loss": 8.624783515930176,
90
+ "eval_loss": 0.20898549258708954,
91
+ "eval_runtime": 148.8373,
92
+ "eval_samples_per_second": 271.639,
93
+ "eval_steps_per_second": 16.978,
94
+ "step": 2842
95
+ },
96
+ {
97
+ "epoch": 2.11,
98
+ "learning_rate": 9.44405348346235e-06,
99
+ "loss": 0.1706,
100
+ "step": 3000
101
+ },
102
+ {
103
+ "epoch": 2.29,
104
+ "learning_rate": 8.56439127375088e-06,
105
+ "loss": 0.1574,
106
+ "step": 3250
107
+ },
108
+ {
109
+ "epoch": 2.46,
110
+ "learning_rate": 7.68472906403941e-06,
111
+ "loss": 0.1573,
112
+ "step": 3500
113
+ },
114
+ {
115
+ "epoch": 2.64,
116
+ "learning_rate": 6.808585503166785e-06,
117
+ "loss": 0.1619,
118
+ "step": 3750
119
+ },
120
+ {
121
+ "epoch": 2.81,
122
+ "learning_rate": 5.928923293455313e-06,
123
+ "loss": 0.1574,
124
+ "step": 4000
125
+ },
126
+ {
127
+ "epoch": 2.99,
128
+ "learning_rate": 5.049261083743843e-06,
129
+ "loss": 0.1575,
130
+ "step": 4250
131
+ },
132
+ {
133
+ "epoch": 3.0,
134
+ "eval_accuracy": 0.9169921875,
135
+ "eval_binary_crossentropy_loss": 8.323027610778809,
136
+ "eval_loss": 0.20993904769420624,
137
+ "eval_runtime": 148.6942,
138
+ "eval_samples_per_second": 271.9,
139
+ "eval_steps_per_second": 16.995,
140
+ "step": 4263
141
+ }
142
+ ],
143
+ "max_steps": 5684,
144
+ "num_train_epochs": 4,
145
+ "total_flos": 7.180021641700454e+16,
146
+ "trial_name": null,
147
+ "trial_params": null
148
+ }
checkpoint-4263/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:824f49e09d1e24e24b56eb55553854b0550b51f9807cc4bcc80575e87eb1efe4
3
+ size 3439
checkpoint-5684/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
checkpoint-5684/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/deberta-v3-base",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-07,
13
+ "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "norm_rel_ebd": "layer_norm",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_dropout": 0,
21
+ "pooler_hidden_act": "gelu",
22
+ "pooler_hidden_size": 768,
23
+ "pos_att_type": [
24
+ "p2c",
25
+ "c2p"
26
+ ],
27
+ "position_biased_input": false,
28
+ "position_buckets": 256,
29
+ "relative_attention": true,
30
+ "share_att_key": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.24.0",
33
+ "type_vocab_size": 0,
34
+ "vocab_size": 128100
35
+ }
checkpoint-5684/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b4701c310864ec95f47dd060ab3172cec793bc50583c324ab20d76876eda8b
3
+ size 1475556165
checkpoint-5684/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9844060415fe086b8fc9d401b82589fce254389c80d41ab68a74543f5f3cc16
3
+ size 737766955
checkpoint-5684/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51dd1148f1869e988e323551f902630f4c6058ace117c23d947be1febda42ee8
3
+ size 14503
checkpoint-5684/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c3bf7d39d50349c13985b5d79e8e43f189781b9a084733bba5f6296b8118011
3
+ size 559
checkpoint-5684/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7fa27de976f860090af1dc2e9c3ce9327d48a9d735296824b1ac8c15ae2810
3
+ size 559
checkpoint-5684/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
checkpoint-5684/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616