albertmartinez commited on
Commit
bfa7f29
1 Parent(s): b6d935c

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -0
  2. all_results.json +8 -8
  3. eval_results.json +5 -5
  4. train_results.json +3 -3
  5. trainer_state.json +13 -13
README.md CHANGED
@@ -3,6 +3,8 @@ license: apache-2.0
3
  base_model: google-bert/bert-base-uncased
4
  tags:
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: bert-sdg-classification
8
  results: []
@@ -14,6 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
14
  # bert-sdg-classification
15
 
16
  This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on an unknown dataset.
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: google-bert/bert-base-uncased
4
  tags:
5
  - generated_from_trainer
6
+ metrics:
7
+ - f1
8
  model-index:
9
  - name: bert-sdg-classification
10
  results: []
 
16
  # bert-sdg-classification
17
 
18
  This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.7344
21
+ - F1: 0.7900
22
 
23
  ## Model description
24
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_f1": 0.9421819286392854,
4
- "eval_loss": 0.3017396032810211,
5
- "eval_runtime": 2280.6832,
6
  "eval_samples": 12908,
7
- "eval_samples_per_second": 5.66,
8
- "eval_steps_per_second": 0.708,
9
  "total_flos": 2.377533515518771e+16,
10
- "train_loss": 0.15210941878937695,
11
- "train_runtime": 47350.7748,
12
  "train_samples": 30117,
13
- "train_samples_per_second": 1.908,
14
  "train_steps_per_second": 0.06
15
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_f1": 0.789972706564468,
4
+ "eval_loss": 0.7343637943267822,
5
+ "eval_runtime": 2232.9309,
6
  "eval_samples": 12908,
7
+ "eval_samples_per_second": 5.781,
8
+ "eval_steps_per_second": 0.723,
9
  "total_flos": 2.377533515518771e+16,
10
+ "train_loss": 1.0759811563350086,
11
+ "train_runtime": 46811.0436,
12
  "train_samples": 30117,
13
+ "train_samples_per_second": 1.93,
14
  "train_steps_per_second": 0.06
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_f1": 0.9421819286392854,
4
- "eval_loss": 0.3017396032810211,
5
- "eval_runtime": 2280.6832,
6
  "eval_samples": 12908,
7
- "eval_samples_per_second": 5.66,
8
- "eval_steps_per_second": 0.708
9
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_f1": 0.789972706564468,
4
+ "eval_loss": 0.7343637943267822,
5
+ "eval_runtime": 2232.9309,
6
  "eval_samples": 12908,
7
+ "eval_samples_per_second": 5.781,
8
+ "eval_steps_per_second": 0.723
9
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 2.377533515518771e+16,
4
- "train_loss": 0.15210941878937695,
5
- "train_runtime": 47350.7748,
6
  "train_samples": 30117,
7
- "train_samples_per_second": 1.908,
8
  "train_steps_per_second": 0.06
9
  }
 
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 2.377533515518771e+16,
4
+ "train_loss": 1.0759811563350086,
5
+ "train_runtime": 46811.0436,
6
  "train_samples": 30117,
7
+ "train_samples_per_second": 1.93,
8
  "train_steps_per_second": 0.06
9
  }
trainer_state.json CHANGED
@@ -10,46 +10,46 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.5307855626326964,
13
- "grad_norm": 3.384657382965088,
14
  "learning_rate": 8.333333333333334e-06,
15
- "loss": 0.2368,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 1.0615711252653928,
20
- "grad_norm": 10.156657218933105,
21
  "learning_rate": 8.203054806828391e-06,
22
- "loss": 0.1146,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 1.5923566878980893,
27
- "grad_norm": 0.6068603992462158,
28
  "learning_rate": 5.9568733153638815e-06,
29
- "loss": 0.1799,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 2.1231422505307855,
34
- "grad_norm": 6.402149677276611,
35
  "learning_rate": 3.710691823899371e-06,
36
- "loss": 0.1593,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 2.653927813163482,
41
- "grad_norm": 3.010026693344116,
42
  "learning_rate": 1.464510332434861e-06,
43
- "loss": 0.1031,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 3.0,
48
  "step": 2826,
49
  "total_flos": 2.377533515518771e+16,
50
- "train_loss": 0.15210941878937695,
51
- "train_runtime": 47350.7748,
52
- "train_samples_per_second": 1.908,
53
  "train_steps_per_second": 0.06
54
  }
55
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.5307855626326964,
13
+ "grad_norm": 5.489389896392822,
14
  "learning_rate": 8.333333333333334e-06,
15
+ "loss": 2.354,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 1.0615711252653928,
20
+ "grad_norm": 5.129901885986328,
21
  "learning_rate": 8.203054806828391e-06,
22
+ "loss": 1.0543,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 1.5923566878980893,
27
+ "grad_norm": 9.780948638916016,
28
  "learning_rate": 5.9568733153638815e-06,
29
+ "loss": 0.8205,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 2.1231422505307855,
34
+ "grad_norm": 8.44005012512207,
35
  "learning_rate": 3.710691823899371e-06,
36
+ "loss": 0.7534,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 2.653927813163482,
41
+ "grad_norm": 4.482905387878418,
42
  "learning_rate": 1.464510332434861e-06,
43
+ "loss": 0.6727,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 3.0,
48
  "step": 2826,
49
  "total_flos": 2.377533515518771e+16,
50
+ "train_loss": 1.0759811563350086,
51
+ "train_runtime": 46811.0436,
52
+ "train_samples_per_second": 1.93,
53
  "train_steps_per_second": 0.06
54
  }
55
  ],