vjsyong commited on
Commit
ea22528
1 Parent(s): 3f60480

End of training

Browse files
Files changed (5) hide show
  1. README.md +21 -21
  2. config.json +1 -1
  3. tf_model.h5 +1 -1
  4. tokenizer.json +2 -2
  5. tokenizer_config.json +1 -1
README.md CHANGED
@@ -14,9 +14,9 @@ probably proofread and complete it, then remove this comment. -->
14
 
15
  This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
- - Train Loss: 0.2634
18
- - Validation Loss: 0.4040
19
- - Train Accuracy: 0.8125
20
  - Epoch: 13
21
 
22
  ## Model description
@@ -43,25 +43,25 @@ The following hyperparameters were used during training:
43
 
44
  | Train Loss | Validation Loss | Train Accuracy | Epoch |
45
  |:----------:|:---------------:|:--------------:|:-----:|
46
- | 0.7133 | 0.6935 | 0.5 | 0 |
47
- | 0.6888 | 0.6926 | 0.5 | 1 |
48
- | 0.7065 | 0.6924 | 0.5 | 2 |
49
- | 0.6875 | 0.6922 | 0.5 | 3 |
50
- | 0.6950 | 0.6918 | 0.5 | 4 |
51
- | 0.6945 | 0.6915 | 0.5 | 5 |
52
- | 0.6865 | 0.6899 | 0.7292 | 6 |
53
- | 0.6838 | 0.6877 | 0.7708 | 7 |
54
- | 0.6909 | 0.6825 | 0.8542 | 8 |
55
- | 0.6786 | 0.6691 | 0.8333 | 9 |
56
- | 0.5574 | 0.4069 | 0.8542 | 10 |
57
- | 0.4274 | 0.8131 | 0.6042 | 11 |
58
- | 0.4728 | 0.3997 | 0.7708 | 12 |
59
- | 0.2634 | 0.4040 | 0.8125 | 13 |
60
 
61
 
62
  ### Framework versions
63
 
64
- - Transformers 4.27.3
65
- - TensorFlow 2.10.0
66
- - Datasets 2.10.1
67
- - Tokenizers 0.13.2
 
14
 
15
  This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
+ - Train Loss: 0.0140
18
+ - Validation Loss: 0.4773
19
+ - Train Accuracy: 0.8958
20
  - Epoch: 13
21
 
22
  ## Model description
 
43
 
44
  | Train Loss | Validation Loss | Train Accuracy | Epoch |
45
  |:----------:|:---------------:|:--------------:|:-----:|
46
+ | 0.1762 | 0.3984 | 0.875 | 0 |
47
+ | 0.1873 | 0.3250 | 0.8542 | 1 |
48
+ | 0.1339 | 0.4448 | 0.875 | 2 |
49
+ | 0.0316 | 0.4015 | 0.8958 | 3 |
50
+ | 0.0226 | 0.4410 | 0.875 | 4 |
51
+ | 0.0166 | 0.4586 | 0.8958 | 5 |
52
+ | 0.0157 | 0.4710 | 0.8958 | 6 |
53
+ | 0.0113 | 0.4772 | 0.8958 | 7 |
54
+ | 0.0159 | 0.4773 | 0.8958 | 8 |
55
+ | 0.0105 | 0.4773 | 0.8958 | 9 |
56
+ | 0.0119 | 0.4773 | 0.8958 | 10 |
57
+ | 0.0120 | 0.4773 | 0.8958 | 11 |
58
+ | 0.0135 | 0.4773 | 0.8958 | 12 |
59
+ | 0.0140 | 0.4773 | 0.8958 | 13 |
60
 
61
 
62
  ### Framework versions
63
 
64
+ - Transformers 4.28.1
65
+ - TensorFlow 2.10.1
66
+ - Datasets 2.11.0
67
+ - Tokenizers 0.13.3
config.json CHANGED
@@ -28,7 +28,7 @@
28
  "output_past": true,
29
  "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
- "transformers_version": "4.27.3",
32
  "type_vocab_size": 1,
33
  "use_cache": true,
34
  "vocab_size": 250002
 
28
  "output_past": true,
29
  "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
+ "transformers_version": "4.28.1",
32
  "type_vocab_size": 1,
33
  "use_cache": true,
34
  "vocab_size": 250002
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da391a6a0050764bee47bcb14079ab9a07d545e5c33b5eb23ff52b5a91f4e74e
3
  size 1112470336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:462b5892b19e7228c29f67ff27711984c5c69d8df7a8b4007a02f1691fbac3ce
3
  size 1112470336
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:62c24cdc13d4c9952d63718d6c9fa4c287974249e16b7ade6d5a85e7bbb75626
3
- size 17082660
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2c509a525eb51aebb33fb59c24ee923c1d4c1db23c3ae81fe05ccf354084f7b
3
+ size 17082758
tokenizer_config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "bos_token": "<s>",
 
3
  "cls_token": "<s>",
4
  "eos_token": "</s>",
5
  "mask_token": {
@@ -13,7 +14,6 @@
13
  "model_max_length": 512,
14
  "pad_token": "<pad>",
15
  "sep_token": "</s>",
16
- "special_tokens_map_file": null,
17
  "tokenizer_class": "XLMRobertaTokenizer",
18
  "unk_token": "<unk>"
19
  }
 
1
  {
2
  "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
  "cls_token": "<s>",
5
  "eos_token": "</s>",
6
  "mask_token": {
 
14
  "model_max_length": 512,
15
  "pad_token": "<pad>",
16
  "sep_token": "</s>",
 
17
  "tokenizer_class": "XLMRobertaTokenizer",
18
  "unk_token": "<unk>"
19
  }