markusleonardo commited on
Commit
846bf5d
1 Parent(s): c283b88

End of training

Browse files
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: distilbert/distilroberta-base
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - f1
9
+ model-index:
10
+ - name: TrainedModel
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # TrainedModel
18
+
19
+ This model is a fine-tuned version of [distilbert/distilroberta-base](https://huggingface.co/distilbert/distilroberta-base) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.2843
22
+ - F1: 0.9032
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 2e-05
42
+ - train_batch_size: 64
43
+ - eval_batch_size: 8
44
+ - seed: 10
45
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
+ - lr_scheduler_type: linear
47
+ - lr_scheduler_warmup_steps: 1000
48
+ - num_epochs: 3
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss | F1 |
53
+ |:-------------:|:-----:|:-----:|:---------------:|:------:|
54
+ | 0.4601 | 1.0 | 8584 | 0.3070 | 0.8870 |
55
+ | 0.3303 | 2.0 | 17168 | 0.2857 | 0.9000 |
56
+ | 0.2862 | 3.0 | 25752 | 0.2843 | 0.9032 |
57
+
58
+
59
+ ### Framework versions
60
+
61
+ - Transformers 4.47.1
62
+ - Pytorch 2.5.1+cu121
63
+ - Datasets 3.2.0
64
+ - Tokenizers 0.21.0
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert/distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "entailment",
15
+ "1": "neutral",
16
+ "2": "contradiction"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "contradiction": 2,
22
+ "entailment": 0,
23
+ "neutral": 1
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 6,
30
+ "pad_token_id": 1,
31
+ "position_embedding_type": "absolute",
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.47.1",
34
+ "type_vocab_size": 1,
35
+ "use_cache": true,
36
+ "vocab_size": 50265
37
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c4eb8a38866da359f15cfd3926af7e5fde1f357aad68e25e4aa0722f4c6391e
3
+ size 328495356
runs/Dec24_01-52-52_a67fdec5a019/events.out.tfevents.1735005174.a67fdec5a019.2726.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33ed76f66215eba84f39ef92f3ac7ee0d34457b74562e324f4dd759bb6e93e74
3
+ size 7201
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e3c9fe93014484bc1091a6119df70533945f2b909e9bf05ff4550a3f504c2ac
3
+ size 5304