haryoaw commited on
Commit
30d26d4
1 Parent(s): d961c78

Initial Commit

Browse files
Files changed (4) hide show
  1. README.md +109 -0
  2. config.json +39 -0
  3. pytorch_model.bin +3 -0
  4. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - tweet_sentiment_multilingual
8
+ metrics:
9
+ - accuracy
10
+ - f1
11
+ model-index:
12
+ - name: scenario-KD-PO-CDF-ALL-D2_data-cardiffnlp_tweet_sentiment_multilingual_all_gamma
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # scenario-KD-PO-CDF-ALL-D2_data-cardiffnlp_tweet_sentiment_multilingual_all_gamma
20
+
21
+ This model is a fine-tuned version of [haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a](https://huggingface.co/haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a) on the tweet_sentiment_multilingual dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 3.5245
24
+ - Accuracy: 0.5517
25
+ - F1: 0.5524
26
+
27
+ ## Model description
28
+
29
+ More information needed
30
+
31
+ ## Intended uses & limitations
32
+
33
+ More information needed
34
+
35
+ ## Training and evaluation data
36
+
37
+ More information needed
38
+
39
+ ## Training procedure
40
+
41
+ ### Training hyperparameters
42
+
43
+ The following hyperparameters were used during training:
44
+ - learning_rate: 5e-05
45
+ - train_batch_size: 32
46
+ - eval_batch_size: 32
47
+ - seed: 88458
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: linear
50
+ - num_epochs: 50
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
55
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|
56
+ | 4.8673 | 1.09 | 500 | 4.1044 | 0.4356 | 0.4381 |
57
+ | 4.0355 | 2.17 | 1000 | 3.8162 | 0.5004 | 0.4829 |
58
+ | 3.4812 | 3.26 | 1500 | 3.3484 | 0.5312 | 0.5299 |
59
+ | 3.1323 | 4.35 | 2000 | 3.3401 | 0.5502 | 0.5500 |
60
+ | 2.7632 | 5.43 | 2500 | 3.5126 | 0.5471 | 0.5441 |
61
+ | 2.5101 | 6.52 | 3000 | 3.5161 | 0.5444 | 0.5412 |
62
+ | 2.3266 | 7.61 | 3500 | 3.6769 | 0.5367 | 0.5263 |
63
+ | 2.1096 | 8.7 | 4000 | 3.6299 | 0.5513 | 0.5501 |
64
+ | 1.972 | 9.78 | 4500 | 3.4289 | 0.5432 | 0.5428 |
65
+ | 1.8345 | 10.87 | 5000 | 3.3890 | 0.5502 | 0.5464 |
66
+ | 1.711 | 11.96 | 5500 | 3.3365 | 0.5548 | 0.5553 |
67
+ | 1.6043 | 13.04 | 6000 | 3.4657 | 0.5529 | 0.5527 |
68
+ | 1.4994 | 14.13 | 6500 | 3.3948 | 0.5494 | 0.5500 |
69
+ | 1.404 | 15.22 | 7000 | 3.5906 | 0.5529 | 0.5533 |
70
+ | 1.3423 | 16.3 | 7500 | 3.5538 | 0.5575 | 0.5555 |
71
+ | 1.2991 | 17.39 | 8000 | 3.5762 | 0.5532 | 0.5539 |
72
+ | 1.217 | 18.48 | 8500 | 3.6649 | 0.5517 | 0.5518 |
73
+ | 1.1763 | 19.57 | 9000 | 3.5238 | 0.5513 | 0.5503 |
74
+ | 1.1249 | 20.65 | 9500 | 3.5218 | 0.5436 | 0.5453 |
75
+ | 1.0774 | 21.74 | 10000 | 3.7103 | 0.5617 | 0.5622 |
76
+ | 1.0558 | 22.83 | 10500 | 3.6698 | 0.5567 | 0.5558 |
77
+ | 1.0036 | 23.91 | 11000 | 3.4754 | 0.5648 | 0.5645 |
78
+ | 0.9734 | 25.0 | 11500 | 3.5782 | 0.5490 | 0.5483 |
79
+ | 0.9614 | 26.09 | 12000 | 3.4920 | 0.5586 | 0.5600 |
80
+ | 0.9221 | 27.17 | 12500 | 3.5416 | 0.5440 | 0.5436 |
81
+ | 0.905 | 28.26 | 13000 | 3.5065 | 0.5640 | 0.5635 |
82
+ | 0.8845 | 29.35 | 13500 | 3.6653 | 0.5463 | 0.5464 |
83
+ | 0.8614 | 30.43 | 14000 | 3.5104 | 0.5583 | 0.5571 |
84
+ | 0.8414 | 31.52 | 14500 | 3.6002 | 0.5548 | 0.5554 |
85
+ | 0.8328 | 32.61 | 15000 | 3.5431 | 0.5544 | 0.5527 |
86
+ | 0.8134 | 33.7 | 15500 | 3.5080 | 0.5590 | 0.5585 |
87
+ | 0.7973 | 34.78 | 16000 | 3.4150 | 0.5583 | 0.5578 |
88
+ | 0.7887 | 35.87 | 16500 | 3.6270 | 0.5486 | 0.5502 |
89
+ | 0.7778 | 36.96 | 17000 | 3.6464 | 0.5494 | 0.5491 |
90
+ | 0.7662 | 38.04 | 17500 | 3.5100 | 0.5633 | 0.5627 |
91
+ | 0.7553 | 39.13 | 18000 | 3.5580 | 0.5532 | 0.5537 |
92
+ | 0.7426 | 40.22 | 18500 | 3.4555 | 0.5594 | 0.5583 |
93
+ | 0.7494 | 41.3 | 19000 | 3.5871 | 0.5590 | 0.5554 |
94
+ | 0.7252 | 42.39 | 19500 | 3.4094 | 0.5590 | 0.5595 |
95
+ | 0.7293 | 43.48 | 20000 | 3.4817 | 0.5656 | 0.5661 |
96
+ | 0.7103 | 44.57 | 20500 | 3.4964 | 0.5594 | 0.5596 |
97
+ | 0.718 | 45.65 | 21000 | 3.4770 | 0.5598 | 0.5593 |
98
+ | 0.7147 | 46.74 | 21500 | 3.4938 | 0.5613 | 0.5616 |
99
+ | 0.7014 | 47.83 | 22000 | 3.4664 | 0.5571 | 0.5567 |
100
+ | 0.6991 | 48.91 | 22500 | 3.4357 | 0.5606 | 0.5606 |
101
+ | 0.6944 | 50.0 | 23000 | 3.5245 | 0.5517 | 0.5524 |
102
+
103
+
104
+ ### Framework versions
105
+
106
+ - Transformers 4.33.3
107
+ - Pytorch 2.1.1+cu121
108
+ - Datasets 2.14.5
109
+ - Tokenizers 0.13.3
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "haryoaw/scenario-TCR_data-cardiffnlp_tweet_sentiment_multilingual_all_a",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassificationKD"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 384,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 1536,
20
+ "label2id": {
21
+ "LABEL_0": 0,
22
+ "LABEL_1": 1,
23
+ "LABEL_2": 2
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "xlm-roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 6,
30
+ "output_past": true,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.33.3",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 250002
39
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b8bd1296b894b046939aba31e91800d75e4cc8c604186cbb32cd8afe7fd8c1
3
+ size 429199798
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fdf5490a682d2bcd1985e3f9a0423abe6398465a0749e153f0e820430652b2b
3
+ size 4664