joheras commited on
Commit
99ea228
1 Parent(s): e89721f

Training complete

Browse files
Files changed (5) hide show
  1. README.md +12 -11
  2. config.json +1 -1
  3. model.safetensors +3 -0
  4. tokenizer_config.json +43 -2
  5. training_args.bin +2 -2
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
  license: apache-2.0
 
3
  tags:
4
  - classification
5
  - generated_from_trainer
@@ -17,12 +18,12 @@ model-index:
17
  name: poem_sentiment
18
  type: poem_sentiment
19
  config: default
20
- split: train
21
  args: default
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.9038461538461539
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,8 +33,8 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the poem_sentiment dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.5088
36
- - Accuracy: 0.9038
37
 
38
  ## Model description
39
 
@@ -64,14 +65,14 @@ The following hyperparameters were used during training:
64
 
65
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
66
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
67
- | No log | 1.0 | 112 | 0.4324 | 0.8654 |
68
- | No log | 2.0 | 224 | 0.4070 | 0.875 |
69
- | No log | 3.0 | 336 | 0.5088 | 0.9038 |
70
 
71
 
72
  ### Framework versions
73
 
74
- - Transformers 4.25.1
75
- - Pytorch 1.13.0+cu116
76
- - Datasets 2.8.0
77
- - Tokenizers 0.13.2
 
1
  ---
2
  license: apache-2.0
3
+ base_model: bert-base-uncased
4
  tags:
5
  - classification
6
  - generated_from_trainer
 
18
  name: poem_sentiment
19
  type: poem_sentiment
20
  config: default
21
+ split: test
22
  args: default
23
  metrics:
24
  - name: Accuracy
25
  type: accuracy
26
+ value: 0.8461538461538461
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
33
 
34
  This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the poem_sentiment dataset.
35
  It achieves the following results on the evaluation set:
36
+ - Loss: 0.5423
37
+ - Accuracy: 0.8462
38
 
39
  ## Model description
40
 
 
65
 
66
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
+ | No log | 1.0 | 112 | 0.4428 | 0.8558 |
69
+ | No log | 2.0 | 224 | 0.4875 | 0.8462 |
70
+ | No log | 3.0 | 336 | 0.5423 | 0.8462 |
71
 
72
 
73
  ### Framework versions
74
 
75
+ - Transformers 4.36.0
76
+ - Pytorch 2.1.0+cu118
77
+ - Datasets 2.15.0
78
+ - Tokenizers 0.15.0
config.json CHANGED
@@ -32,7 +32,7 @@
32
  "position_embedding_type": "absolute",
33
  "problem_type": "single_label_classification",
34
  "torch_dtype": "float32",
35
- "transformers_version": "4.25.1",
36
  "type_vocab_size": 2,
37
  "use_cache": true,
38
  "vocab_size": 30522
 
32
  "position_embedding_type": "absolute",
33
  "problem_type": "single_label_classification",
34
  "torch_dtype": "float32",
35
+ "transformers_version": "4.36.0",
36
  "type_vocab_size": 2,
37
  "use_cache": true,
38
  "vocab_size": 30522
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:328bf65bb5ac913696ee0f18d6d2d7de8ed00c30885f859f635cf3f4cfecaf20
3
+ size 437964800
tokenizer_config.json CHANGED
@@ -1,12 +1,53 @@
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "cls_token": "[CLS]",
3
  "do_lower_case": true,
4
  "mask_token": "[MASK]",
5
  "model_max_length": 512,
6
- "name_or_path": "bert-base-uncased",
7
  "pad_token": "[PAD]",
8
  "sep_token": "[SEP]",
9
- "special_tokens_map_file": null,
10
  "strip_accents": null,
11
  "tokenize_chinese_chars": true,
12
  "tokenizer_class": "BertTokenizer",
 
1
  {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_lower_case": true,
47
  "mask_token": "[MASK]",
48
  "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
 
51
  "strip_accents": null,
52
  "tokenize_chinese_chars": true,
53
  "tokenizer_class": "BertTokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59d5d427100ac83edb81a2fccd0a501cb9738ae661b94d8c7f4ead59cd604cb2
3
- size 3451
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0249d7b6befd5fce24b1a96b0ab64012afded28c91f6608bd2f2fe4d7985485
3
+ size 4728