Cheng98 commited on
Commit
dd437f3
1 Parent(s): c2ddb10

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,13 +1,28 @@
1
  ---
 
 
2
  license: other
3
  base_model: facebook/opt-125m
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: opt-125m-rte
10
- results: []
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # opt-125m-rte
17
 
18
- This model is a fine-tuned version of [facebook/opt-125m](https://huggingface.co/facebook/opt-125m) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.7647
21
- - Accuracy: 0.6354
22
 
23
  ## Model description
24
 
@@ -37,24 +52,16 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 5e-05
41
- - train_batch_size: 16
42
- - eval_batch_size: 32
43
  - seed: 42
44
- - gradient_accumulation_steps: 8
45
- - total_train_batch_size: 128
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - num_epochs: 4
49
 
50
  ### Training results
51
 
52
- | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
- |:-------------:|:-----:|:----:|:---------------:|:--------:|
54
- | No log | 0.97 | 19 | 0.6622 | 0.6462 |
55
- | No log | 2.0 | 39 | 0.6257 | 0.6390 |
56
- | No log | 2.97 | 58 | 0.7573 | 0.6354 |
57
- | No log | 3.9 | 76 | 0.7647 | 0.6354 |
58
 
59
 
60
  ### Framework versions
 
1
  ---
2
+ language:
3
+ - en
4
  license: other
5
  base_model: facebook/opt-125m
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - accuracy
12
  model-index:
13
  - name: opt-125m-rte
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: GLUE RTE
20
+ type: glue
21
+ args: rte
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.6425992779783394
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # opt-125m-rte
32
 
33
+ This model is a fine-tuned version of [facebook/opt-125m](https://huggingface.co/facebook/opt-125m) on the GLUE RTE dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 3.2055
36
+ - Accuracy: 0.6426
37
 
38
  ## Model description
39
 
 
52
  ### Training hyperparameters
53
 
54
  The following hyperparameters were used during training:
55
+ - learning_rate: 2e-05
56
+ - train_batch_size: 8
57
+ - eval_batch_size: 8
58
  - seed: 42
 
 
59
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
  - lr_scheduler_type: linear
61
+ - num_epochs: 5.0
62
 
63
  ### Training results
64
 
 
 
 
 
 
 
65
 
66
 
67
  ### Framework versions
all_results.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
- "epoch": 3.9,
3
- "eval_accuracy": 0.6353790613718412,
4
- "eval_loss": 0.764682948589325,
5
- "eval_runtime": 0.9007,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 307.546,
8
- "eval_steps_per_second": 9.992,
9
- "train_loss": 0.5004565590306332,
10
- "train_runtime": 76.0596,
11
  "train_samples": 2490,
12
- "train_samples_per_second": 130.95,
13
- "train_steps_per_second": 0.999
14
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.6425992779783394,
4
+ "eval_loss": 3.205474376678467,
5
+ "eval_runtime": 0.5373,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 515.583,
8
+ "eval_steps_per_second": 65.146,
9
+ "train_loss": 0.3344010169689472,
10
+ "train_runtime": 98.9629,
11
  "train_samples": 2490,
12
+ "train_samples_per_second": 125.805,
13
+ "train_steps_per_second": 15.763
14
  }
config.json CHANGED
@@ -13,16 +13,16 @@
13
  "enable_bias": true,
14
  "eos_token_id": 2,
15
  "ffn_dim": 3072,
16
- "finetuning_task": "text-classification",
17
  "hidden_size": 768,
18
  "id2label": {
19
- "0": "0",
20
- "1": "1"
21
  },
22
  "init_std": 0.02,
23
  "label2id": {
24
- "0": 0,
25
- "1": 1
26
  },
27
  "layer_norm_elementwise_affine": true,
28
  "layerdrop": 0.0,
 
13
  "enable_bias": true,
14
  "eos_token_id": 2,
15
  "ffn_dim": 3072,
16
+ "finetuning_task": "rte",
17
  "hidden_size": 768,
18
  "id2label": {
19
+ "0": "entailment",
20
+ "1": "not_entailment"
21
  },
22
  "init_std": 0.02,
23
  "label2id": {
24
+ "entailment": 0,
25
+ "not_entailment": 1
26
  },
27
  "layer_norm_elementwise_affine": true,
28
  "layerdrop": 0.0,
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 3.9,
3
- "eval_accuracy": 0.6353790613718412,
4
- "eval_loss": 0.764682948589325,
5
- "eval_runtime": 0.9007,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 307.546,
8
- "eval_steps_per_second": 9.992
9
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.6425992779783394,
4
+ "eval_loss": 3.205474376678467,
5
+ "eval_runtime": 0.5373,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 515.583,
8
+ "eval_steps_per_second": 65.146
9
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b82d4ef6934c862a4c85f3972c435695856da58933d1eab159f2e7ed760907a
3
  size 501029729
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cb641be82ca0f1ccfb4f61fe42542d9659f201169ff773b587f2a936c8dfb1a
3
  size 501029729
tokenizer.json CHANGED
@@ -2,13 +2,13 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 512,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
  "padding": {
10
  "strategy": {
11
- "Fixed": 512
12
  },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 128,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
  "padding": {
10
  "strategy": {
11
+ "Fixed": 128
12
  },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 3.9,
3
- "train_loss": 0.5004565590306332,
4
- "train_runtime": 76.0596,
5
  "train_samples": 2490,
6
- "train_samples_per_second": 130.95,
7
- "train_steps_per_second": 0.999
8
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.3344010169689472,
4
+ "train_runtime": 98.9629,
5
  "train_samples": 2490,
6
+ "train_samples_per_second": 125.805,
7
+ "train_steps_per_second": 15.763
8
  }
trainer_state.json CHANGED
@@ -1,61 +1,43 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.8974358974358974,
5
- "global_step": 76,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 0.97,
12
- "eval_accuracy": 0.6462093862815884,
13
- "eval_loss": 0.6622310280799866,
14
- "eval_runtime": 0.8176,
15
- "eval_samples_per_second": 338.782,
16
- "eval_steps_per_second": 11.007,
17
- "step": 19
18
  },
19
  {
20
- "epoch": 2.0,
21
- "eval_accuracy": 0.6389891696750902,
22
- "eval_loss": 0.6256995797157288,
23
- "eval_runtime": 0.8184,
24
- "eval_samples_per_second": 338.47,
25
- "eval_steps_per_second": 10.997,
26
- "step": 39
27
  },
28
  {
29
- "epoch": 2.97,
30
- "eval_accuracy": 0.6353790613718412,
31
- "eval_loss": 0.7572575807571411,
32
- "eval_runtime": 0.8328,
33
- "eval_samples_per_second": 332.625,
34
- "eval_steps_per_second": 10.807,
35
- "step": 58
36
  },
37
  {
38
- "epoch": 3.9,
39
- "eval_accuracy": 0.6353790613718412,
40
- "eval_loss": 0.764682948589325,
41
- "eval_runtime": 0.817,
42
- "eval_samples_per_second": 339.032,
43
- "eval_steps_per_second": 11.015,
44
- "step": 76
45
- },
46
- {
47
- "epoch": 3.9,
48
- "step": 76,
49
- "total_flos": 2537191448248320.0,
50
- "train_loss": 0.5004565590306332,
51
- "train_runtime": 76.0596,
52
- "train_samples_per_second": 130.95,
53
- "train_steps_per_second": 0.999
54
  }
55
  ],
56
- "max_steps": 76,
57
- "num_train_epochs": 4,
58
- "total_flos": 2537191448248320.0,
59
  "trial_name": null,
60
  "trial_params": null
61
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 1560,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 1.6,
12
+ "learning_rate": 1.3589743589743592e-05,
13
+ "loss": 0.6095,
14
+ "step": 500
 
 
 
15
  },
16
  {
17
+ "epoch": 3.21,
18
+ "learning_rate": 7.17948717948718e-06,
19
+ "loss": 0.3272,
20
+ "step": 1000
 
 
 
21
  },
22
  {
23
+ "epoch": 4.81,
24
+ "learning_rate": 7.692307692307694e-07,
25
+ "loss": 0.0972,
26
+ "step": 1500
 
 
 
27
  },
28
  {
29
+ "epoch": 5.0,
30
+ "step": 1560,
31
+ "total_flos": 813286136217600.0,
32
+ "train_loss": 0.3344010169689472,
33
+ "train_runtime": 98.9629,
34
+ "train_samples_per_second": 125.805,
35
+ "train_steps_per_second": 15.763
 
 
 
 
 
 
 
 
 
36
  }
37
  ],
38
+ "max_steps": 1560,
39
+ "num_train_epochs": 5,
40
+ "total_flos": 813286136217600.0,
41
  "trial_name": null,
42
  "trial_params": null
43
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:640c9076d2caaa9604b9af40e9338f27bfb15056397526f47595f15d4d0dd940
3
- size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b7acb9d14b816ffeacb7e2beaba2d6037a7bff441ba00c7df1b155086660724
3
+ size 3963