superbigtree commited on
Commit
7efda69
1 Parent(s): 57f8d44

End of training

Browse files
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - generated_from_trainer
4
  datasets:
5
- - coco_dataset_script
6
  model-index:
7
  - name: clip-roberta-finetuned
8
  results: []
@@ -13,7 +13,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # clip-roberta-finetuned
15
 
16
- This model was trained from scratch on the coco_dataset_script dataset.
 
 
17
 
18
  ## Model description
19
 
 
2
  tags:
3
  - generated_from_trainer
4
  datasets:
5
+ - ydshieh/coco_dataset_script
6
  model-index:
7
  - name: clip-roberta-finetuned
8
  results: []
 
13
 
14
  # clip-roberta-finetuned
15
 
16
+ This model was trained from scratch on the ydshieh/coco_dataset_script 2017 dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 1.5655
19
 
20
  ## Model description
21
 
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_loss": 1.5824697017669678,
4
- "eval_runtime": 43.6193,
5
- "eval_samples_per_second": 573.462,
6
- "eval_steps_per_second": 2.247,
7
- "train_loss": 0.3156686747637862,
8
- "train_runtime": 5245.2618,
9
- "train_samples_per_second": 338.45,
10
- "train_steps_per_second": 1.322
11
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 1.5654611587524414,
4
+ "eval_runtime": 22.7529,
5
+ "eval_samples_per_second": 1099.377,
6
+ "eval_steps_per_second": 2.154,
7
+ "train_loss": 0.31381386621600615,
8
+ "train_runtime": 2692.8552,
9
+ "train_samples_per_second": 659.248,
10
+ "train_steps_per_second": 1.288
11
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_loss": 1.5824697017669678,
4
- "eval_runtime": 43.6193,
5
- "eval_samples_per_second": 573.462,
6
- "eval_steps_per_second": 2.247
7
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 1.5654611587524414,
4
+ "eval_runtime": 22.7529,
5
+ "eval_samples_per_second": 1099.377,
6
+ "eval_steps_per_second": 2.154
7
  }
runs/Feb12_18-25-46_28fc6ffbaa77/events.out.tfevents.1707765211.28fc6ffbaa77.4275.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47e3771d04fd0a366483fcde57ee02e6f32df88f6fd84078ca4acf3abcd48b9f
3
+ size 359
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 3.0,
3
- "train_loss": 0.3156686747637862,
4
- "train_runtime": 5245.2618,
5
- "train_samples_per_second": 338.45,
6
- "train_steps_per_second": 1.322
7
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "train_loss": 0.31381386621600615,
4
+ "train_runtime": 2692.8552,
5
+ "train_samples_per_second": 659.248,
6
+ "train_steps_per_second": 1.288
7
  }
trainer_state.json CHANGED
@@ -3,101 +3,59 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
- "global_step": 6936,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
- {
12
- "epoch": 0.22,
13
- "learning_rate": 4.639561707035756e-05,
14
- "loss": 0.9035,
15
- "step": 500
16
- },
17
  {
18
  "epoch": 0.43,
19
  "learning_rate": 4.2791234140715114e-05,
20
- "loss": 0.5166,
21
- "step": 1000
22
- },
23
- {
24
- "epoch": 0.65,
25
- "learning_rate": 3.9186851211072664e-05,
26
- "loss": 0.4342,
27
- "step": 1500
28
  },
29
  {
30
  "epoch": 0.87,
31
  "learning_rate": 3.558246828143022e-05,
32
- "loss": 0.3862,
33
- "step": 2000
34
- },
35
- {
36
- "epoch": 1.08,
37
- "learning_rate": 3.1978085351787776e-05,
38
- "loss": 0.3272,
39
- "step": 2500
40
  },
41
  {
42
  "epoch": 1.3,
43
  "learning_rate": 2.8373702422145332e-05,
44
- "loss": 0.2757,
45
- "step": 3000
46
- },
47
- {
48
- "epoch": 1.51,
49
- "learning_rate": 2.4769319492502884e-05,
50
- "loss": 0.2623,
51
- "step": 3500
52
  },
53
  {
54
  "epoch": 1.73,
55
  "learning_rate": 2.116493656286044e-05,
56
- "loss": 0.2416,
57
- "step": 4000
58
- },
59
- {
60
- "epoch": 1.95,
61
- "learning_rate": 1.7560553633217993e-05,
62
- "loss": 0.2301,
63
- "step": 4500
64
  },
65
  {
66
  "epoch": 2.16,
67
  "learning_rate": 1.395617070357555e-05,
68
- "loss": 0.1858,
69
- "step": 5000
70
- },
71
- {
72
- "epoch": 2.38,
73
- "learning_rate": 1.0351787773933102e-05,
74
- "loss": 0.1698,
75
- "step": 5500
76
  },
77
  {
78
  "epoch": 2.6,
79
  "learning_rate": 6.747404844290659e-06,
80
- "loss": 0.1611,
81
- "step": 6000
82
- },
83
- {
84
- "epoch": 2.81,
85
- "learning_rate": 3.143021914648212e-06,
86
- "loss": 0.1545,
87
- "step": 6500
88
  },
89
  {
90
  "epoch": 3.0,
91
- "step": 6936,
92
  "total_flos": 2.370754172808069e+17,
93
- "train_loss": 0.3156686747637862,
94
- "train_runtime": 5245.2618,
95
- "train_samples_per_second": 338.45,
96
- "train_steps_per_second": 1.322
97
  }
98
  ],
99
  "logging_steps": 500,
100
- "max_steps": 6936,
101
  "num_input_tokens_seen": 0,
102
  "num_train_epochs": 3,
103
  "save_steps": 500,
 
3
  "best_model_checkpoint": null,
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 3468,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
 
 
 
 
 
 
11
  {
12
  "epoch": 0.43,
13
  "learning_rate": 4.2791234140715114e-05,
14
+ "loss": 0.725,
15
+ "step": 500
 
 
 
 
 
 
16
  },
17
  {
18
  "epoch": 0.87,
19
  "learning_rate": 3.558246828143022e-05,
20
+ "loss": 0.3879,
21
+ "step": 1000
 
 
 
 
 
 
22
  },
23
  {
24
  "epoch": 1.3,
25
  "learning_rate": 2.8373702422145332e-05,
26
+ "loss": 0.2897,
27
+ "step": 1500
 
 
 
 
 
 
28
  },
29
  {
30
  "epoch": 1.73,
31
  "learning_rate": 2.116493656286044e-05,
32
+ "loss": 0.2463,
33
+ "step": 2000
 
 
 
 
 
 
34
  },
35
  {
36
  "epoch": 2.16,
37
  "learning_rate": 1.395617070357555e-05,
38
+ "loss": 0.2074,
39
+ "step": 2500
 
 
 
 
 
 
40
  },
41
  {
42
  "epoch": 2.6,
43
  "learning_rate": 6.747404844290659e-06,
44
+ "loss": 0.1688,
45
+ "step": 3000
 
 
 
 
 
 
46
  },
47
  {
48
  "epoch": 3.0,
49
+ "step": 3468,
50
  "total_flos": 2.370754172808069e+17,
51
+ "train_loss": 0.31381386621600615,
52
+ "train_runtime": 2692.8552,
53
+ "train_samples_per_second": 659.248,
54
+ "train_steps_per_second": 1.288
55
  }
56
  ],
57
  "logging_steps": 500,
58
+ "max_steps": 3468,
59
  "num_input_tokens_seen": 0,
60
  "num_train_epochs": 3,
61
  "save_steps": 500,