mrm8488 commited on
Commit
9059152
1 Parent(s): 5720b66

Initial commit from mrm8488

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +76 -0
  2. checkpoint-1020/config.json +39 -0
  3. checkpoint-1020/merges.txt +0 -0
  4. checkpoint-1020/optimizer.pt +3 -0
  5. checkpoint-1020/pytorch_model.bin +3 -0
  6. checkpoint-1020/rng_state.pth +3 -0
  7. checkpoint-1020/scheduler.pt +3 -0
  8. checkpoint-1020/special_tokens_map.json +1 -0
  9. checkpoint-1020/tokenizer.json +0 -0
  10. checkpoint-1020/tokenizer_config.json +1 -0
  11. checkpoint-1020/trainer_state.json +64 -0
  12. checkpoint-1020/training_args.bin +3 -0
  13. checkpoint-1020/vocab.json +0 -0
  14. checkpoint-1275/config.json +39 -0
  15. checkpoint-1275/merges.txt +0 -0
  16. checkpoint-1275/optimizer.pt +3 -0
  17. checkpoint-1275/pytorch_model.bin +3 -0
  18. checkpoint-1275/rng_state.pth +3 -0
  19. checkpoint-1275/scheduler.pt +3 -0
  20. checkpoint-1275/special_tokens_map.json +1 -0
  21. checkpoint-1275/tokenizer.json +0 -0
  22. checkpoint-1275/tokenizer_config.json +1 -0
  23. checkpoint-1275/trainer_state.json +73 -0
  24. checkpoint-1275/training_args.bin +3 -0
  25. checkpoint-1275/vocab.json +0 -0
  26. checkpoint-255/config.json +39 -0
  27. checkpoint-255/merges.txt +0 -0
  28. checkpoint-255/optimizer.pt +3 -0
  29. checkpoint-255/pytorch_model.bin +3 -0
  30. checkpoint-255/rng_state.pth +3 -0
  31. checkpoint-255/scheduler.pt +3 -0
  32. checkpoint-255/special_tokens_map.json +1 -0
  33. checkpoint-255/tokenizer.json +0 -0
  34. checkpoint-255/tokenizer_config.json +1 -0
  35. checkpoint-255/trainer_state.json +25 -0
  36. checkpoint-255/training_args.bin +3 -0
  37. checkpoint-255/vocab.json +0 -0
  38. checkpoint-510/config.json +39 -0
  39. checkpoint-510/merges.txt +0 -0
  40. checkpoint-510/optimizer.pt +3 -0
  41. checkpoint-510/pytorch_model.bin +3 -0
  42. checkpoint-510/rng_state.pth +3 -0
  43. checkpoint-510/scheduler.pt +3 -0
  44. checkpoint-510/special_tokens_map.json +1 -0
  45. checkpoint-510/tokenizer.json +0 -0
  46. checkpoint-510/tokenizer_config.json +1 -0
  47. checkpoint-510/trainer_state.json +40 -0
  48. checkpoint-510/training_args.bin +3 -0
  49. checkpoint-510/vocab.json +0 -0
  50. checkpoint-765/config.json +39 -0
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - financial_phrasebank
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: distilRoberta-financial-sentiment
11
+ results:
12
+ - task:
13
+ name: Text Classification
14
+ type: text-classification
15
+ dataset:
16
+ name: financial_phrasebank
17
+ type: financial_phrasebank
18
+ args: sentences_allagree
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.9823008849557522
23
+ ---
24
+
25
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
+ should probably proofread and complete it, then remove this comment. -->
27
+
28
+ # distilRoberta-financial-sentiment
29
+
30
+ This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the financial_phrasebank dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 0.1116
33
+ - Accuracy: 0.9823
34
+
35
+ ## Model description
36
+
37
+ More information needed
38
+
39
+ ## Intended uses & limitations
40
+
41
+ More information needed
42
+
43
+ ## Training and evaluation data
44
+
45
+ More information needed
46
+
47
+ ## Training procedure
48
+
49
+ ### Training hyperparameters
50
+
51
+ The following hyperparameters were used during training:
52
+ - learning_rate: 2e-05
53
+ - train_batch_size: 8
54
+ - eval_batch_size: 8
55
+ - seed: 42
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: linear
58
+ - num_epochs: 5
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
64
+ | No log | 1.0 | 255 | 0.1670 | 0.9646 |
65
+ | 0.209 | 2.0 | 510 | 0.2290 | 0.9558 |
66
+ | 0.209 | 3.0 | 765 | 0.2044 | 0.9558 |
67
+ | 0.0326 | 4.0 | 1020 | 0.1116 | 0.9823 |
68
+ | 0.0326 | 5.0 | 1275 | 0.1127 | 0.9779 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - Transformers 4.10.2
74
+ - Pytorch 1.9.0+cu102
75
+ - Datasets 1.12.1
76
+ - Tokenizers 0.10.3
checkpoint-1020/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 6,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.10.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }
checkpoint-1020/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1020/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a10d139e099d856113f8f9bec12c657e6d97106685112c3f8f9090244f21ac6
3
+ size 657026205
checkpoint-1020/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6d24cd7c45f0b65241fd9ff1aa97814eea3ab7bdbf1458248fb9f4b2c817864
3
+ size 328529005
checkpoint-1020/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:133c6f14a48205cfdb6ebb24164c59ba141f1b3bb6282c1a0f245e16e419918e
3
+ size 14503
checkpoint-1020/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b5fb5b60622a213ada574e87f31d8d1d9c80a4bff68598fca269c91968c006c
3
+ size 623
checkpoint-1020/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
checkpoint-1020/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1020/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilroberta-base", "tokenizer_class": "RobertaTokenizer"}
checkpoint-1020/trainer_state.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9823008849557522,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/distilRoberta-financial-sentiment/checkpoint-1020",
4
+ "epoch": 4.0,
5
+ "global_step": 1020,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.9646017699115044,
13
+ "eval_loss": 0.16703279316425323,
14
+ "eval_runtime": 0.4253,
15
+ "eval_samples_per_second": 531.427,
16
+ "eval_steps_per_second": 68.192,
17
+ "step": 255
18
+ },
19
+ {
20
+ "epoch": 1.96,
21
+ "learning_rate": 1.215686274509804e-05,
22
+ "loss": 0.209,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "eval_accuracy": 0.9557522123893806,
28
+ "eval_loss": 0.22898824512958527,
29
+ "eval_runtime": 0.4057,
30
+ "eval_samples_per_second": 557.077,
31
+ "eval_steps_per_second": 71.483,
32
+ "step": 510
33
+ },
34
+ {
35
+ "epoch": 3.0,
36
+ "eval_accuracy": 0.9557522123893806,
37
+ "eval_loss": 0.20438142120838165,
38
+ "eval_runtime": 0.4213,
39
+ "eval_samples_per_second": 536.442,
40
+ "eval_steps_per_second": 68.835,
41
+ "step": 765
42
+ },
43
+ {
44
+ "epoch": 3.92,
45
+ "learning_rate": 4.313725490196079e-06,
46
+ "loss": 0.0326,
47
+ "step": 1000
48
+ },
49
+ {
50
+ "epoch": 4.0,
51
+ "eval_accuracy": 0.9823008849557522,
52
+ "eval_loss": 0.11158797889947891,
53
+ "eval_runtime": 0.4245,
54
+ "eval_samples_per_second": 532.332,
55
+ "eval_steps_per_second": 68.308,
56
+ "step": 1020
57
+ }
58
+ ],
59
+ "max_steps": 1275,
60
+ "num_train_epochs": 5,
61
+ "total_flos": 109104889463388.0,
62
+ "trial_name": null,
63
+ "trial_params": null
64
+ }
checkpoint-1020/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee1178219233a39de3467c1d1c9ad2fd1d976e51b9ed6bb5a459131607445eaf
3
+ size 2735
checkpoint-1020/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1275/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 6,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.10.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }
checkpoint-1275/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1275/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75fcbedc38d2c17d18ac54e29a3a67ef140ebce4c29e1c97100eb85ffa994859
3
+ size 657026205
checkpoint-1275/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c9cd771ae142b084f337b48bc6d3ef08344519e7e8b800d65d7581ffd1f6dc
3
+ size 328529005
checkpoint-1275/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60e92978b58e882b21cd2cb09760e0deb38b0133cae445d92efa6a28baa781a7
3
+ size 14503
checkpoint-1275/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0b3ce3a7534d3726f9e692752affc1b48524ec0f784701f84651de3aca1e0f0
3
+ size 623
checkpoint-1275/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
checkpoint-1275/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1275/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilroberta-base", "tokenizer_class": "RobertaTokenizer"}
checkpoint-1275/trainer_state.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9823008849557522,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/distilRoberta-financial-sentiment/checkpoint-1020",
4
+ "epoch": 5.0,
5
+ "global_step": 1275,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.9646017699115044,
13
+ "eval_loss": 0.16703279316425323,
14
+ "eval_runtime": 0.4253,
15
+ "eval_samples_per_second": 531.427,
16
+ "eval_steps_per_second": 68.192,
17
+ "step": 255
18
+ },
19
+ {
20
+ "epoch": 1.96,
21
+ "learning_rate": 1.215686274509804e-05,
22
+ "loss": 0.209,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "eval_accuracy": 0.9557522123893806,
28
+ "eval_loss": 0.22898824512958527,
29
+ "eval_runtime": 0.4057,
30
+ "eval_samples_per_second": 557.077,
31
+ "eval_steps_per_second": 71.483,
32
+ "step": 510
33
+ },
34
+ {
35
+ "epoch": 3.0,
36
+ "eval_accuracy": 0.9557522123893806,
37
+ "eval_loss": 0.20438142120838165,
38
+ "eval_runtime": 0.4213,
39
+ "eval_samples_per_second": 536.442,
40
+ "eval_steps_per_second": 68.835,
41
+ "step": 765
42
+ },
43
+ {
44
+ "epoch": 3.92,
45
+ "learning_rate": 4.313725490196079e-06,
46
+ "loss": 0.0326,
47
+ "step": 1000
48
+ },
49
+ {
50
+ "epoch": 4.0,
51
+ "eval_accuracy": 0.9823008849557522,
52
+ "eval_loss": 0.11158797889947891,
53
+ "eval_runtime": 0.4245,
54
+ "eval_samples_per_second": 532.332,
55
+ "eval_steps_per_second": 68.308,
56
+ "step": 1020
57
+ },
58
+ {
59
+ "epoch": 5.0,
60
+ "eval_accuracy": 0.9778761061946902,
61
+ "eval_loss": 0.11265852302312851,
62
+ "eval_runtime": 0.431,
63
+ "eval_samples_per_second": 524.402,
64
+ "eval_steps_per_second": 67.291,
65
+ "step": 1275
66
+ }
67
+ ],
68
+ "max_steps": 1275,
69
+ "num_train_epochs": 5,
70
+ "total_flos": 136563387115644.0,
71
+ "trial_name": null,
72
+ "trial_params": null
73
+ }
checkpoint-1275/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee1178219233a39de3467c1d1c9ad2fd1d976e51b9ed6bb5a459131607445eaf
3
+ size 2735
checkpoint-1275/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-255/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 6,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.10.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }
checkpoint-255/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-255/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:975b1ebc8d6a6c90134f55202316d19b3e80107d206c614021d293fd65c24106
3
+ size 657026077
checkpoint-255/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4b733235868bb0ec23026fbf9503e44701a09e253c725f4e3c48d9010d9119b
3
+ size 328529005
checkpoint-255/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a3a992b385bfed8a7947f3349b47e31d3a41012ea7e604ba8930022525ee395
3
+ size 14503
checkpoint-255/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf9b70b1312ed602ec4c2ce9c481f61d1ea9d352599c28f99624d803ef1abbb2
3
+ size 623
checkpoint-255/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
checkpoint-255/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-255/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilroberta-base", "tokenizer_class": "RobertaTokenizer"}
checkpoint-255/trainer_state.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9646017699115044,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/distilRoberta-financial-sentiment/checkpoint-255",
4
+ "epoch": 1.0,
5
+ "global_step": 255,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.9646017699115044,
13
+ "eval_loss": 0.16703279316425323,
14
+ "eval_runtime": 0.4253,
15
+ "eval_samples_per_second": 531.427,
16
+ "eval_steps_per_second": 68.192,
17
+ "step": 255
18
+ }
19
+ ],
20
+ "max_steps": 1275,
21
+ "num_train_epochs": 5,
22
+ "total_flos": 27420205611960.0,
23
+ "trial_name": null,
24
+ "trial_params": null
25
+ }
checkpoint-255/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee1178219233a39de3467c1d1c9ad2fd1d976e51b9ed6bb5a459131607445eaf
3
+ size 2735
checkpoint-255/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-510/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 6,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.10.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }
checkpoint-510/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-510/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d8bcdb6b232b75268539ac908094b87c2485cf9f72e43a602596e731db2ac2a
3
+ size 657026205
checkpoint-510/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d037b8e1adbc44aa7ac023e87cbc070f9e38fbff4760759f77d7908d06af361
3
+ size 328529005
checkpoint-510/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a061e86b42b51652875e3d2f31f7c4fa9b52250106d33553f74eb06c82ac04
3
+ size 14503
checkpoint-510/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcc0a142708d96432e37e864dacf92fbffed7431324f0234396de039f88c9435
3
+ size 623
checkpoint-510/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
checkpoint-510/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-510/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilroberta-base", "tokenizer_class": "RobertaTokenizer"}
checkpoint-510/trainer_state.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9646017699115044,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/distilRoberta-financial-sentiment/checkpoint-255",
4
+ "epoch": 2.0,
5
+ "global_step": 510,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.9646017699115044,
13
+ "eval_loss": 0.16703279316425323,
14
+ "eval_runtime": 0.4253,
15
+ "eval_samples_per_second": 531.427,
16
+ "eval_steps_per_second": 68.192,
17
+ "step": 255
18
+ },
19
+ {
20
+ "epoch": 1.96,
21
+ "learning_rate": 1.215686274509804e-05,
22
+ "loss": 0.209,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "eval_accuracy": 0.9557522123893806,
28
+ "eval_loss": 0.22898824512958527,
29
+ "eval_runtime": 0.4057,
30
+ "eval_samples_per_second": 557.077,
31
+ "eval_steps_per_second": 71.483,
32
+ "step": 510
33
+ }
34
+ ],
35
+ "max_steps": 1275,
36
+ "num_train_epochs": 5,
37
+ "total_flos": 54781938243468.0,
38
+ "trial_name": null,
39
+ "trial_params": null
40
+ }
checkpoint-510/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee1178219233a39de3467c1d1c9ad2fd1d976e51b9ed6bb5a459131607445eaf
3
+ size 2735
checkpoint-510/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-765/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 6,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.10.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }