vinluvie commited on
Commit
5cb0045
1 Parent(s): fd02824

End of training

Browse files
README.md CHANGED
@@ -15,6 +15,8 @@ should probably proofread and complete it, then remove this comment. -->
15
  # clip-vit-large-patch14-finetuned-sofas
16
 
17
  This model is a fine-tuned version of [openai/clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) on the imagefolder dataset.
 
 
18
 
19
  ## Model description
20
 
 
15
  # clip-vit-large-patch14-finetuned-sofas
16
 
17
  This model is a fine-tuned version of [openai/clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) on the imagefolder dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 2.1527
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_loss": 3.532412052154541,
4
- "eval_runtime": 10.0151,
5
  "eval_samples_per_second": 4.693,
6
- "eval_steps_per_second": 0.2,
7
- "test_loss": 5.01876974105835,
8
- "test_runtime": 20.7585,
9
- "test_samples_per_second": 4.673,
10
- "test_steps_per_second": 0.193,
11
- "train_loss": 1.9417870839436848,
12
- "train_runtime": 3223.0323,
13
- "train_samples_per_second": 1.176,
14
- "train_steps_per_second": 0.037
15
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_loss": 2.1526899337768555,
4
+ "eval_runtime": 11.0798,
5
  "eval_samples_per_second": 4.693,
6
+ "eval_steps_per_second": 0.181,
7
+ "test_loss": 2.584235906600952,
8
+ "test_runtime": 22.1131,
9
+ "test_samples_per_second": 4.703,
10
+ "test_steps_per_second": 0.181,
11
+ "train_loss": 1.5176889692034041,
12
+ "train_runtime": 3535.8421,
13
+ "train_samples_per_second": 1.185,
14
+ "train_steps_per_second": 0.04
15
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_loss": 3.532412052154541,
4
- "eval_runtime": 10.0151,
5
  "eval_samples_per_second": 4.693,
6
- "eval_steps_per_second": 0.2
7
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_loss": 2.1526899337768555,
4
+ "eval_runtime": 11.0798,
5
  "eval_samples_per_second": 4.693,
6
+ "eval_steps_per_second": 0.181
7
  }
test_results.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "test_loss": 5.01876974105835,
3
- "test_runtime": 20.7585,
4
- "test_samples_per_second": 4.673,
5
- "test_steps_per_second": 0.193
6
  }
 
1
  {
2
+ "test_loss": 2.584235906600952,
3
+ "test_runtime": 22.1131,
4
+ "test_samples_per_second": 4.703,
5
+ "test_steps_per_second": 0.181
6
  }
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 10.0,
3
- "train_loss": 1.9417870839436848,
4
- "train_runtime": 3223.0323,
5
- "train_samples_per_second": 1.176,
6
- "train_steps_per_second": 0.037
7
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "train_loss": 1.5176889692034041,
4
+ "train_runtime": 3535.8421,
5
+ "train_samples_per_second": 1.185,
6
+ "train_steps_per_second": 0.04
7
  }
trainer_state.json CHANGED
@@ -3,27 +3,27 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 10.0,
5
  "eval_steps": 500,
6
- "global_step": 120,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 10.0,
13
- "step": 120,
14
- "total_flos": 681742075637700.0,
15
- "train_loss": 1.9417870839436848,
16
- "train_runtime": 3223.0323,
17
- "train_samples_per_second": 1.176,
18
- "train_steps_per_second": 0.037
19
  }
20
  ],
21
  "logging_steps": 500,
22
- "max_steps": 120,
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 10,
25
  "save_steps": 500,
26
- "total_flos": 681742075637700.0,
27
  "train_batch_size": 32,
28
  "trial_name": null,
29
  "trial_params": null
 
3
  "best_model_checkpoint": null,
4
  "epoch": 10.0,
5
  "eval_steps": 500,
6
+ "global_step": 140,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 10.0,
13
+ "step": 140,
14
+ "total_flos": 753693745889700.0,
15
+ "train_loss": 1.5176889692034041,
16
+ "train_runtime": 3535.8421,
17
+ "train_samples_per_second": 1.185,
18
+ "train_steps_per_second": 0.04
19
  }
20
  ],
21
  "logging_steps": 500,
22
+ "max_steps": 140,
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 10,
25
  "save_steps": 500,
26
+ "total_flos": 753693745889700.0,
27
  "train_batch_size": 32,
28
  "trial_name": null,
29
  "trial_params": null