haryoaw commited on
Commit
3b25074
1 Parent(s): e00e757

Initial Commit

Browse files
Files changed (4) hide show
  1. README.md +109 -0
  2. config.json +39 -0
  3. pytorch_model.bin +3 -0
  4. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - tweet_sentiment_multilingual
8
+ metrics:
9
+ - accuracy
10
+ - f1
11
+ model-index:
12
+ - name: scenario-KD-PO-CDF-ALL-D2_data-cardiffnlp_tweet_sentiment_multilingual_all_delta
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # scenario-KD-PO-CDF-ALL-D2_data-cardiffnlp_tweet_sentiment_multilingual_all_delta
20
+
21
+ This model is a fine-tuned version of [haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a](https://huggingface.co/haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a) on the tweet_sentiment_multilingual dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 3.3760
24
+ - Accuracy: 0.5586
25
+ - F1: 0.5590
26
+
27
+ ## Model description
28
+
29
+ More information needed
30
+
31
+ ## Intended uses & limitations
32
+
33
+ More information needed
34
+
35
+ ## Training and evaluation data
36
+
37
+ More information needed
38
+
39
+ ## Training procedure
40
+
41
+ ### Training hyperparameters
42
+
43
+ The following hyperparameters were used during training:
44
+ - learning_rate: 5e-05
45
+ - train_batch_size: 32
46
+ - eval_batch_size: 32
47
+ - seed: 7777
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: linear
50
+ - num_epochs: 50
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
55
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|
56
+ | 4.88 | 1.09 | 500 | 4.1661 | 0.4518 | 0.4462 |
57
+ | 3.9513 | 2.17 | 1000 | 3.7172 | 0.5154 | 0.5102 |
58
+ | 3.4556 | 3.26 | 1500 | 3.5612 | 0.5293 | 0.5221 |
59
+ | 3.0959 | 4.35 | 2000 | 3.5271 | 0.5440 | 0.5349 |
60
+ | 2.7593 | 5.43 | 2500 | 3.3105 | 0.5602 | 0.5595 |
61
+ | 2.548 | 6.52 | 3000 | 3.4842 | 0.5355 | 0.5360 |
62
+ | 2.3323 | 7.61 | 3500 | 3.2883 | 0.5575 | 0.5444 |
63
+ | 2.1215 | 8.7 | 4000 | 3.2282 | 0.5583 | 0.5607 |
64
+ | 1.9719 | 9.78 | 4500 | 3.5734 | 0.5594 | 0.5534 |
65
+ | 1.8238 | 10.87 | 5000 | 3.2866 | 0.5552 | 0.5538 |
66
+ | 1.703 | 11.96 | 5500 | 3.2345 | 0.5556 | 0.5557 |
67
+ | 1.5931 | 13.04 | 6000 | 3.2219 | 0.5583 | 0.5539 |
68
+ | 1.4959 | 14.13 | 6500 | 3.3073 | 0.5637 | 0.5644 |
69
+ | 1.4198 | 15.22 | 7000 | 3.5221 | 0.5579 | 0.5541 |
70
+ | 1.35 | 16.3 | 7500 | 3.5125 | 0.5660 | 0.5643 |
71
+ | 1.2937 | 17.39 | 8000 | 3.5089 | 0.5640 | 0.5637 |
72
+ | 1.2282 | 18.48 | 8500 | 3.4262 | 0.5664 | 0.5658 |
73
+ | 1.1698 | 19.57 | 9000 | 3.3739 | 0.5598 | 0.5593 |
74
+ | 1.1402 | 20.65 | 9500 | 3.4930 | 0.5521 | 0.5541 |
75
+ | 1.0874 | 21.74 | 10000 | 3.4935 | 0.5625 | 0.5602 |
76
+ | 1.0652 | 22.83 | 10500 | 3.3963 | 0.5482 | 0.5478 |
77
+ | 1.0191 | 23.91 | 11000 | 3.4823 | 0.5571 | 0.5583 |
78
+ | 0.9868 | 25.0 | 11500 | 3.6035 | 0.5579 | 0.5586 |
79
+ | 0.9487 | 26.09 | 12000 | 3.6034 | 0.5525 | 0.5488 |
80
+ | 0.936 | 27.17 | 12500 | 3.5428 | 0.5556 | 0.5542 |
81
+ | 0.9116 | 28.26 | 13000 | 3.6023 | 0.5532 | 0.5509 |
82
+ | 0.8868 | 29.35 | 13500 | 3.5292 | 0.5579 | 0.5581 |
83
+ | 0.8733 | 30.43 | 14000 | 3.4206 | 0.5594 | 0.5589 |
84
+ | 0.8538 | 31.52 | 14500 | 3.4417 | 0.5594 | 0.5592 |
85
+ | 0.8289 | 32.61 | 15000 | 3.4970 | 0.5579 | 0.5584 |
86
+ | 0.823 | 33.7 | 15500 | 3.4860 | 0.5625 | 0.5617 |
87
+ | 0.7992 | 34.78 | 16000 | 3.5193 | 0.5671 | 0.5659 |
88
+ | 0.7974 | 35.87 | 16500 | 3.3709 | 0.5490 | 0.5497 |
89
+ | 0.7775 | 36.96 | 17000 | 3.3854 | 0.5706 | 0.5720 |
90
+ | 0.7691 | 38.04 | 17500 | 3.3827 | 0.5698 | 0.5700 |
91
+ | 0.758 | 39.13 | 18000 | 3.4608 | 0.5818 | 0.5818 |
92
+ | 0.757 | 40.22 | 18500 | 3.3860 | 0.5683 | 0.5681 |
93
+ | 0.7481 | 41.3 | 19000 | 3.3757 | 0.5687 | 0.5686 |
94
+ | 0.7387 | 42.39 | 19500 | 3.4830 | 0.5714 | 0.5707 |
95
+ | 0.7276 | 43.48 | 20000 | 3.3942 | 0.5617 | 0.5611 |
96
+ | 0.7279 | 44.57 | 20500 | 3.3357 | 0.5725 | 0.5726 |
97
+ | 0.7127 | 45.65 | 21000 | 3.3856 | 0.5521 | 0.5523 |
98
+ | 0.7148 | 46.74 | 21500 | 3.4401 | 0.5660 | 0.5673 |
99
+ | 0.7219 | 47.83 | 22000 | 3.4684 | 0.5629 | 0.5627 |
100
+ | 0.7005 | 48.91 | 22500 | 3.3840 | 0.5625 | 0.5631 |
101
+ | 0.7114 | 50.0 | 23000 | 3.3760 | 0.5586 | 0.5590 |
102
+
103
+
104
+ ### Framework versions
105
+
106
+ - Transformers 4.33.3
107
+ - Pytorch 2.1.1+cu121
108
+ - Datasets 2.14.5
109
+ - Tokenizers 0.13.3
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassificationKD"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 384,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 1536,
20
+ "label2id": {
21
+ "LABEL_0": 0,
22
+ "LABEL_1": 1,
23
+ "LABEL_2": 2
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "xlm-roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 6,
30
+ "output_past": true,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.33.3",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 250002
39
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52651a8396429e639ea99c381a5d9200ef9fee9ee71416938557e6515caa51ba
3
+ size 429199798
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf2446fd202457883c94d43ee74090ce1dd6f516a3b6cb4e2195a9cabf5416ec
3
+ size 4664