LysandreJik commited on
Commit
f949bfa
·
1 Parent(s): 72a3c1f
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - glue
9
+ metrics:
10
+ - accuracy
11
+ - f1
12
+ model_index:
13
+ - name: testing
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: GLUE MRPC
20
+ type: glue
21
+ args: mrpc
22
+ metric:
23
+ name: F1
24
+ type: f1
25
+ value: 0.8104956268221574
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # testing
32
+
33
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.6644
36
+ - Accuracy: 0.6814
37
+ - F1: 0.8105
38
+ - Combined Score: 0.7459
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 2e-05
58
+ - train_batch_size: 1
59
+ - eval_batch_size: 8
60
+ - seed: 42
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - training_steps: 10
64
+
65
+ ### Training results
66
+
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.10.0.dev0
72
+ - Pytorch 1.9.0+cu111
73
+ - Datasets 1.11.0
74
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.0,
3
+ "eval_accuracy": 0.6813725490196079,
4
+ "eval_combined_score": 0.7459340879208827,
5
+ "eval_f1": 0.8104956268221574,
6
+ "eval_loss": 0.6643973588943481,
7
+ "eval_runtime": 1.3324,
8
+ "eval_samples": 408,
9
+ "eval_samples_per_second": 306.21,
10
+ "eval_steps_per_second": 38.276,
11
+ "train_loss": 0.6649961948394776,
12
+ "train_runtime": 0.4443,
13
+ "train_samples": 3668,
14
+ "train_samples_per_second": 22.507,
15
+ "train_steps_per_second": 22.507
16
+ }
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "finetuning_task": "mrpc",
11
+ "hidden_dim": 3072,
12
+ "initializer_range": 0.02,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "distilbert",
15
+ "n_heads": 12,
16
+ "n_layers": 6,
17
+ "pad_token_id": 0,
18
+ "problem_type": "single_label_classification",
19
+ "qa_dropout": 0.1,
20
+ "seq_classif_dropout": 0.2,
21
+ "sinusoidal_pos_embds": false,
22
+ "tie_weights_": true,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.10.0.dev0",
25
+ "vocab_size": 30522
26
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,experiment_id,project_name,duration,emissions,energy_consumed,country_name,country_iso_code,region,on_cloud,cloud_provider,cloud_region
2
+ 2021-09-22T14:49:03,22510de0-5f5a-4992-a10f-5f87ce9f9508,codecarbon,3.48142671585083,2.050872917901026e-06,9.743978340031914e-06,United States,USA,new york,N,,
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.0,
3
+ "eval_accuracy": 0.6813725490196079,
4
+ "eval_combined_score": 0.7459340879208827,
5
+ "eval_f1": 0.8104956268221574,
6
+ "eval_loss": 0.6643973588943481,
7
+ "eval_runtime": 1.3324,
8
+ "eval_samples": 408,
9
+ "eval_samples_per_second": 306.21,
10
+ "eval_steps_per_second": 38.276
11
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:014324fc4acbc63977f6f32eab74752231a997514ab5122c50b13d62c4becf2c
3
+ size 267860081
runs/Sep22_14-48-48_Beaver/1632336539.9057102/events.out.tfevents.1632336539.Beaver.1803589.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5658a63013549ed5b7ea37a14bccfd55cd01c67b9d19de97108ab55470564db9
3
+ size 4170
runs/Sep22_14-48-48_Beaver/events.out.tfevents.1632336539.Beaver.1803589.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b48508dbc11a055495d9474667a53e358c4266de6fbc8e5d2049f41ecab7253c
3
+ size 3202
runs/Sep22_14-48-48_Beaver/events.out.tfevents.1632336545.Beaver.1803589.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a24a52d0acdc7a005b1ec57c274a841f4f62d6021851a799153bbeac126ccf4
3
+ size 459
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.0,
3
+ "train_loss": 0.6649961948394776,
4
+ "train_runtime": 0.4443,
5
+ "train_samples": 3668,
6
+ "train_samples_per_second": 22.507,
7
+ "train_steps_per_second": 22.507
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.0027262813522355507,
5
+ "global_step": 10,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "step": 10,
13
+ "total_flos": 331168496640.0,
14
+ "train_loss": 0.6649961948394776,
15
+ "train_runtime": 0.4443,
16
+ "train_samples_per_second": 22.507,
17
+ "train_steps_per_second": 22.507
18
+ }
19
+ ],
20
+ "max_steps": 10,
21
+ "num_train_epochs": 1,
22
+ "total_flos": 331168496640.0,
23
+ "trial_name": null,
24
+ "trial_params": null
25
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82f5afd8071cb5726167ecfa873079d6dd823e50c1ea1a06e51b530298b4d840
3
+ size 2607
vocab.txt ADDED
The diff for this file is too large to render. See raw diff