amyeroberts HF staff commited on
Commit
926742e
1 Parent(s): 38543f3

End of training

Browse files
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
6
  metrics:
7
  - accuracy
@@ -16,10 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/aeroberts4444/huggingface/runs/120mmtvn)
17
  # vit-base-beans-2
18
 
19
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Accuracy: 0.0
22
- - Loss: 1.1626
23
 
24
  ## Model description
25
 
 
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
  metrics:
9
  - accuracy
 
18
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/aeroberts4444/huggingface/runs/120mmtvn)
19
  # vit-base-beans-2
20
 
21
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.1599
24
+ - Accuracy: 0.125
25
 
26
  ## Model description
27
 
all_results.json CHANGED
@@ -2,12 +2,12 @@
2
  "epoch": 2.0,
3
  "eval_accuracy": 0.125,
4
  "eval_loss": 1.1599400043487549,
5
- "eval_runtime": 0.178,
6
- "eval_samples_per_second": 44.956,
7
- "eval_steps_per_second": 5.619,
8
  "total_flos": 1239882946412544.0,
9
- "train_loss": 0.96820068359375,
10
- "train_runtime": 21.5994,
11
- "train_samples_per_second": 0.741,
12
- "train_steps_per_second": 0.093
13
  }
 
2
  "epoch": 2.0,
3
  "eval_accuracy": 0.125,
4
  "eval_loss": 1.1599400043487549,
5
+ "eval_runtime": 0.3023,
6
+ "eval_samples_per_second": 26.461,
7
+ "eval_steps_per_second": 3.308,
8
  "total_flos": 1239882946412544.0,
9
+ "train_loss": 0.0,
10
+ "train_runtime": 10.194,
11
+ "train_samples_per_second": 1.57,
12
+ "train_steps_per_second": 0.196
13
  }
config.json CHANGED
@@ -28,6 +28,7 @@
28
  "num_channels": 3,
29
  "num_hidden_layers": 12,
30
  "patch_size": 16,
 
31
  "qkv_bias": true,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.41.0.dev0"
 
28
  "num_channels": 3,
29
  "num_hidden_layers": 12,
30
  "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
  "qkv_bias": true,
33
  "torch_dtype": "float32",
34
  "transformers_version": "4.41.0.dev0"
eval_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 2.0,
3
  "eval_accuracy": 0.125,
4
  "eval_loss": 1.1599400043487549,
5
- "eval_runtime": 0.178,
6
- "eval_samples_per_second": 44.956,
7
- "eval_steps_per_second": 5.619
8
  }
 
2
  "epoch": 2.0,
3
  "eval_accuracy": 0.125,
4
  "eval_loss": 1.1599400043487549,
5
+ "eval_runtime": 0.3023,
6
+ "eval_samples_per_second": 26.461,
7
+ "eval_steps_per_second": 3.308
8
  }
runs/May16_18-16-28_amys-mbp-2.taildb5d.ts.net/events.out.tfevents.1715879844.amys-mbp-2.taildb5d.ts.net ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b48f3669cb285edb055449c393e9ea15dddcf62ecebb4ae9144e7bec2059eb72
3
+ size 40
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
  "total_flos": 1239882946412544.0,
4
- "train_loss": 0.96820068359375,
5
- "train_runtime": 21.5994,
6
- "train_samples_per_second": 0.741,
7
- "train_steps_per_second": 0.093
8
  }
 
1
  {
2
  "epoch": 2.0,
3
  "total_flos": 1239882946412544.0,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 10.194,
6
+ "train_samples_per_second": 1.57,
7
+ "train_steps_per_second": 0.196
8
  }
trainer_state.json CHANGED
@@ -30,10 +30,10 @@
30
  "epoch": 2.0,
31
  "step": 2,
32
  "total_flos": 1239882946412544.0,
33
- "train_loss": 0.96820068359375,
34
- "train_runtime": 21.5994,
35
- "train_samples_per_second": 0.741,
36
- "train_steps_per_second": 0.093
37
  }
38
  ],
39
  "logging_steps": 10,
 
30
  "epoch": 2.0,
31
  "step": 2,
32
  "total_flos": 1239882946412544.0,
33
+ "train_loss": 0.0,
34
+ "train_runtime": 10.194,
35
+ "train_samples_per_second": 1.57,
36
+ "train_steps_per_second": 0.196
37
  }
38
  ],
39
  "logging_steps": 10,