Your Name commited on
Commit
66ca028
1 Parent(s): 712fb6f
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - glue
7
+ metrics:
8
+ - matthews_correlation
9
+ model_index:
10
+ - name: distilbert-base-uncased-finetuned-cola
11
+ results:
12
+ - task:
13
+ name: Text Classification
14
+ type: text-classification
15
+ dataset:
16
+ name: glue
17
+ type: glue
18
+ args: cola
19
+ metric:
20
+ name: Matthews Correlation
21
+ type: matthews_correlation
22
+ value: 0.5055835457225941
23
+ ---
24
+
25
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
+ should probably proofread and complete it, then remove this comment. -->
27
+
28
+ # distilbert-base-uncased-finetuned-cola
29
+
30
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 0.9001
33
+ - Matthews Correlation: 0.5056
34
+
35
+ ## Model description
36
+
37
+ More information needed
38
+
39
+ ## Intended uses & limitations
40
+
41
+ More information needed
42
+
43
+ ## Training and evaluation data
44
+
45
+ More information needed
46
+
47
+ ## Training procedure
48
+
49
+ ### Training hyperparameters
50
+
51
+ The following hyperparameters were used during training:
52
+ - learning_rate: 2e-05
53
+ - train_batch_size: 16
54
+ - eval_batch_size: 16
55
+ - seed: 42
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: linear
58
+ - num_epochs: 5
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------:|
64
+ | 0.5237 | 1.0 | 535 | 0.5217 | 0.4496 |
65
+ | 0.3448 | 2.0 | 1070 | 0.5551 | 0.4965 |
66
+ | 0.2359 | 3.0 | 1605 | 0.6753 | 0.4945 |
67
+ | 0.1683 | 4.0 | 2140 | 0.7892 | 0.5040 |
68
+ | 0.1254 | 5.0 | 2675 | 0.9001 | 0.5056 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - Transformers 4.9.1
74
+ - Pytorch 1.9.0+cu102
75
+ - Datasets 1.11.0
76
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "problem_type": "single_label_classification",
18
+ "qa_dropout": 0.1,
19
+ "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": false,
21
+ "tie_weights_": true,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.9.1",
24
+ "vocab_size": 30522
25
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451ea667198b9042ac9978783de6a3bbd6249de708346d14ed1009d80dda18bb
3
+ size 267860081
runs/Aug05_05-27-00_f3f89bd6c7d9/1628141235.261187/events.out.tfevents.1628141235.f3f89bd6c7d9.62.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df2ed3461030fa60c2848d44714c14763a7c0ffeec6fddd0711511550a3a9368
3
+ size 4221
runs/Aug05_05-27-00_f3f89bd6c7d9/events.out.tfevents.1628141235.f3f89bd6c7d9.62.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddbc8a8497b36b1fa26d553a7a3ac4675989e9593a9f7ce66ffc2892259b6e75
3
+ size 5681
runs/Aug05_05-27-00_f3f89bd6c7d9/events.out.tfevents.1628141403.f3f89bd6c7d9.62.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c62f80dfb1aedea4c39b414c2e43349f1afe5ec882ab0d67fdf00bb731b5f095
3
+ size 375
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7989ee6e1e9bde06d9c0020d53ad87f4df1cc8612118b31f93079c29b055ae19
3
+ size 2671
vocab.txt ADDED
The diff for this file is too large to render. See raw diff