dstefa commited on
Commit
1cda6e9
1 Parent(s): a1d33db

End of training

Browse files
Files changed (5) hide show
  1. README.md +15 -15
  2. merges.txt +1 -1
  3. pytorch_model.bin +1 -1
  4. tokenizer.json +4 -5
  5. tokenizer_config.json +0 -42
README.md CHANGED
@@ -20,11 +20,11 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.0389
24
- - Accuracy: 0.9938
25
- - F1: 0.9938
26
- - Precision: 0.9938
27
- - Recall: 0.9938
28
 
29
  ## Model description
30
 
@@ -54,18 +54,18 @@ The following hyperparameters were used during training:
54
 
55
  ### Training results
56
 
57
- | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
58
- |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
59
- | 0.2345 | 1.0 | 160 | 0.1980 | 0.9437 | 0.9437 | 0.9449 | 0.9437 |
60
- | 0.2676 | 2.0 | 320 | 0.1086 | 0.9844 | 0.9844 | 0.9848 | 0.9844 |
61
- | 0.0393 | 3.0 | 480 | 0.1011 | 0.9812 | 0.9812 | 0.9816 | 0.9812 |
62
- | 0.1025 | 4.0 | 640 | 0.0389 | 0.9938 | 0.9938 | 0.9938 | 0.9938 |
63
- | 0.0004 | 5.0 | 800 | 0.0654 | 0.9875 | 0.9875 | 0.9876 | 0.9875 |
64
 
65
 
66
  ### Framework versions
67
 
68
- - Transformers 4.35.2
69
  - Pytorch 2.1.0+cu121
70
- - Datasets 2.16.1
71
- - Tokenizers 0.15.0
 
20
 
21
  This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.1800
24
+ - Accuracy: 0.9647
25
+ - F1: 0.9647
26
+ - Precision: 0.9647
27
+ - Recall: 0.9647
28
 
29
  ## Model description
30
 
 
54
 
55
  ### Training results
56
 
57
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
58
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:|
59
+ | 0.704 | 1.0 | 8000 | 0.6933 | 0.5 | 0.3333 | 0.25 | 0.5 |
60
+ | 0.6926 | 2.0 | 16000 | 0.6980 | 0.5 | 0.3333 | 0.25 | 0.5 |
61
+ | 0.0099 | 3.0 | 24000 | 0.1800 | 0.9647 | 0.9647 | 0.9647 | 0.9647 |
62
+ | 0.2727 | 4.0 | 32000 | 0.2243 | 0.9526 | 0.9526 | 0.9527 | 0.9526 |
63
+ | 0.0618 | 5.0 | 40000 | 0.2128 | 0.9536 | 0.9536 | 0.9546 | 0.9536 |
64
 
65
 
66
  ### Framework versions
67
 
68
+ - Transformers 4.32.1
69
  - Pytorch 2.1.0+cu121
70
+ - Datasets 2.12.0
71
+ - Tokenizers 0.13.2
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a7dcfb9f9697db932789cbf13d196b36c62a8f16deddeed30f780630ae86b58
3
  size 498658094
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:485e39cb2e356fb2d36785bccce13b83b6bd307b1f091f5c744b507188240327
3
  size 498658094
tokenizer.json CHANGED
@@ -21,7 +21,7 @@
21
  "single_word": false,
22
  "lstrip": false,
23
  "rstrip": false,
24
- "normalized": true,
25
  "special": true
26
  },
27
  {
@@ -30,7 +30,7 @@
30
  "single_word": false,
31
  "lstrip": false,
32
  "rstrip": false,
33
- "normalized": true,
34
  "special": true
35
  },
36
  {
@@ -39,7 +39,7 @@
39
  "single_word": false,
40
  "lstrip": false,
41
  "rstrip": false,
42
- "normalized": true,
43
  "special": true
44
  },
45
  {
@@ -48,7 +48,7 @@
48
  "single_word": false,
49
  "lstrip": false,
50
  "rstrip": false,
51
- "normalized": true,
52
  "special": true
53
  },
54
  {
@@ -94,7 +94,6 @@
94
  "continuing_subword_prefix": "",
95
  "end_of_word_suffix": "",
96
  "fuse_unk": false,
97
- "byte_fallback": false,
98
  "vocab": {
99
  "<s>": 0,
100
  "<pad>": 1,
 
21
  "single_word": false,
22
  "lstrip": false,
23
  "rstrip": false,
24
+ "normalized": false,
25
  "special": true
26
  },
27
  {
 
30
  "single_word": false,
31
  "lstrip": false,
32
  "rstrip": false,
33
+ "normalized": false,
34
  "special": true
35
  },
36
  {
 
39
  "single_word": false,
40
  "lstrip": false,
41
  "rstrip": false,
42
+ "normalized": false,
43
  "special": true
44
  },
45
  {
 
48
  "single_word": false,
49
  "lstrip": false,
50
  "rstrip": false,
51
+ "normalized": false,
52
  "special": true
53
  },
54
  {
 
94
  "continuing_subword_prefix": "",
95
  "end_of_word_suffix": "",
96
  "fuse_unk": false,
 
97
  "vocab": {
98
  "<s>": 0,
99
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -1,47 +1,5 @@
1
  {
2
  "add_prefix_space": false,
3
- "added_tokens_decoder": {
4
- "0": {
5
- "content": "<s>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false,
10
- "special": true
11
- },
12
- "1": {
13
- "content": "<pad>",
14
- "lstrip": false,
15
- "normalized": true,
16
- "rstrip": false,
17
- "single_word": false,
18
- "special": true
19
- },
20
- "2": {
21
- "content": "</s>",
22
- "lstrip": false,
23
- "normalized": true,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": true
27
- },
28
- "3": {
29
- "content": "<unk>",
30
- "lstrip": false,
31
- "normalized": true,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- },
36
- "50264": {
37
- "content": "<mask>",
38
- "lstrip": true,
39
- "normalized": false,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": true
43
- }
44
- },
45
  "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": true,
47
  "cls_token": "<s>",
 
1
  {
2
  "add_prefix_space": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "bos_token": "<s>",
4
  "clean_up_tokenization_spaces": true,
5
  "cls_token": "<s>",