danlou commited on
Commit
dadcd61
1 Parent(s): 944b024
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - commonsense_qa
7
+ metrics:
8
+ - accuracy
9
+ model_index:
10
+ - name: roberta-large-finetuned-csqa
11
+ results:
12
+ - dataset:
13
+ name: commonsense_qa
14
+ type: commonsense_qa
15
+ args: default
16
+ metric:
17
+ name: Accuracy
18
+ type: accuracy
19
+ value: 0.7330057621002197
20
+ ---
21
+
22
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
23
+ should probably proofread and complete it, then remove this comment. -->
24
+
25
+ # roberta-large-finetuned-csqa
26
+
27
+ This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the commonsense_qa dataset.
28
+ It achieves the following results on the evaluation set:
29
+ - Loss: 0.9146
30
+ - Accuracy: 0.7330
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 1e-05
50
+ - train_batch_size: 16
51
+ - eval_batch_size: 16
52
+ - seed: 42
53
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
+ - lr_scheduler_type: linear
55
+ - num_epochs: 5
56
+ - mixed_precision_training: Native AMP
57
+
58
+ ### Training results
59
+
60
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
61
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
62
+ | 1.3903 | 1.0 | 609 | 0.8845 | 0.6642 |
63
+ | 0.8939 | 2.0 | 1218 | 0.7054 | 0.7281 |
64
+ | 0.6163 | 3.0 | 1827 | 0.7452 | 0.7314 |
65
+ | 0.4245 | 4.0 | 2436 | 0.8369 | 0.7355 |
66
+ | 0.3258 | 5.0 | 3045 | 0.9146 | 0.7330 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.9.0
72
+ - Pytorch 1.9.0
73
+ - Datasets 1.10.2
74
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "roberta-large",
3
+ "architectures": [
4
+ "RobertaForMultipleChoice"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.9.0",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 50265
27
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:122dedf530fca2c53e39f880bccdbffdfe1dd1bec501f71037af0a475113641c
3
+ size 1421607277
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "roberta-large", "tokenizer_class": "RobertaTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fd604937978614cbcc0cadc28290de65ae67361747ce157745148eb6028a70d
3
+ size 2671
vocab.json ADDED
The diff for this file is too large to render. See raw diff