Felix Marty
commited on
Commit
·
df4084d
1
Parent(s):
fa16ec1
channels
Browse files- all_results.json +9 -9
- config.json +1 -1
- eval_results.json +5 -5
- preprocessor_config.json +4 -0
- pytorch_model.bin +2 -2
- train.py +10 -5
- train_results.json +4 -4
- trainer_state.json +6 -6
- training_args.bin +1 -1
all_results.json
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
{
|
2 |
"epoch": 6.0,
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime": 0.
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second":
|
8 |
-
"train_loss": 0.
|
9 |
-
"train_runtime":
|
10 |
-
"train_samples_per_second":
|
11 |
-
"train_steps_per_second":
|
12 |
}
|
|
|
1 |
{
|
2 |
"epoch": 6.0,
|
3 |
+
"eval_accuracy": 0.6390977443609023,
|
4 |
+
"eval_loss": 0.7639745473861694,
|
5 |
+
"eval_runtime": 0.7192,
|
6 |
+
"eval_samples_per_second": 184.925,
|
7 |
+
"eval_steps_per_second": 23.637,
|
8 |
+
"train_loss": 0.8484223491013653,
|
9 |
+
"train_runtime": 32.6308,
|
10 |
+
"train_samples_per_second": 190.127,
|
11 |
+
"train_steps_per_second": 6.068
|
12 |
}
|
config.json
CHANGED
@@ -26,7 +26,7 @@
|
|
26 |
},
|
27 |
"layer_type": "basic",
|
28 |
"model_type": "resnet",
|
29 |
-
"num_channels":
|
30 |
"problem_type": "single_label_classification",
|
31 |
"torch_dtype": "float32",
|
32 |
"transformers_version": "4.21.0.dev0"
|
|
|
26 |
},
|
27 |
"layer_type": "basic",
|
28 |
"model_type": "resnet",
|
29 |
+
"num_channels": 3,
|
30 |
"problem_type": "single_label_classification",
|
31 |
"torch_dtype": "float32",
|
32 |
"transformers_version": "4.21.0.dev0"
|
eval_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 6.0,
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime": 0.
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second":
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 6.0,
|
3 |
+
"eval_accuracy": 0.6390977443609023,
|
4 |
+
"eval_loss": 0.7639745473861694,
|
5 |
+
"eval_runtime": 0.7192,
|
6 |
+
"eval_samples_per_second": 184.925,
|
7 |
+
"eval_steps_per_second": 23.637
|
8 |
}
|
preprocessor_config.json
CHANGED
@@ -4,9 +4,13 @@
|
|
4 |
"do_resize": true,
|
5 |
"feature_extractor_type": "ConvNextFeatureExtractor",
|
6 |
"image_mean": [
|
|
|
|
|
7 |
0.45
|
8 |
],
|
9 |
"image_std": [
|
|
|
|
|
10 |
0.22
|
11 |
],
|
12 |
"resample": 3,
|
|
|
4 |
"do_resize": true,
|
5 |
"feature_extractor_type": "ConvNextFeatureExtractor",
|
6 |
"image_mean": [
|
7 |
+
0.45,
|
8 |
+
0.45,
|
9 |
0.45
|
10 |
],
|
11 |
"image_std": [
|
12 |
+
0.22,
|
13 |
+
0.22,
|
14 |
0.22
|
15 |
],
|
16 |
"resample": 3,
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64d031cd790aedfb724b3db0bccf4be88ab3b13655bab905ec329dfa39a8c0d6
|
3 |
+
size 786777
|
train.py
CHANGED
@@ -7,7 +7,7 @@ import datasets
|
|
7 |
import torch
|
8 |
import transformers
|
9 |
from torchinfo import summary
|
10 |
-
from torchvision.transforms import Compose, Normalize, ToTensor
|
11 |
from transformers import (
|
12 |
ConvNextFeatureExtractor,
|
13 |
HfArgumentParser,
|
@@ -103,7 +103,7 @@ def main():
|
|
103 |
dataset["validation"] = split["test"]
|
104 |
|
105 |
feature_extractor = ConvNextFeatureExtractor(
|
106 |
-
do_resize=True, do_normalize=True, image_mean=[0.45], image_std=[0.22]
|
107 |
)
|
108 |
|
109 |
# Prepare label mappings.
|
@@ -115,7 +115,7 @@ def main():
|
|
115 |
id2label[str(i)] = label
|
116 |
|
117 |
config = ResNetConfig(
|
118 |
-
num_channels=
|
119 |
layer_type="basic",
|
120 |
depths=[2, 2],
|
121 |
hidden_sizes=[32, 64],
|
@@ -129,12 +129,17 @@ def main():
|
|
129 |
|
130 |
# Define torchvision transforms to be applied to each image.
|
131 |
normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
132 |
-
_transforms = Compose([
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
def transforms(example_batch):
|
135 |
"""Apply _train_transforms across a batch."""
|
136 |
# black and white
|
137 |
-
example_batch["pixel_values"] = [_transforms(pil_img.convert("
|
138 |
return example_batch
|
139 |
|
140 |
# Load the accuracy metric from the datasets package
|
|
|
7 |
import torch
|
8 |
import transformers
|
9 |
from torchinfo import summary
|
10 |
+
from torchvision.transforms import Compose, Normalize, ToTensor, Resize, CenterCrop
|
11 |
from transformers import (
|
12 |
ConvNextFeatureExtractor,
|
13 |
HfArgumentParser,
|
|
|
103 |
dataset["validation"] = split["test"]
|
104 |
|
105 |
feature_extractor = ConvNextFeatureExtractor(
|
106 |
+
do_resize=True, do_normalize=True, image_mean=[0.45, 0.45, 0.45], image_std=[0.22, 0.22, 0.22]
|
107 |
)
|
108 |
|
109 |
# Prepare label mappings.
|
|
|
115 |
id2label[str(i)] = label
|
116 |
|
117 |
config = ResNetConfig(
|
118 |
+
num_channels=3,
|
119 |
layer_type="basic",
|
120 |
depths=[2, 2],
|
121 |
hidden_sizes=[32, 64],
|
|
|
129 |
|
130 |
# Define torchvision transforms to be applied to each image.
|
131 |
normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
132 |
+
_transforms = Compose([
|
133 |
+
Resize(feature_extractor.size),
|
134 |
+
CenterCrop(feature_extractor.size),
|
135 |
+
ToTensor(),
|
136 |
+
normalize]
|
137 |
+
)
|
138 |
|
139 |
def transforms(example_batch):
|
140 |
"""Apply _train_transforms across a batch."""
|
141 |
# black and white
|
142 |
+
example_batch["pixel_values"] = [_transforms(pil_img.convert("RGB")) for pil_img in example_batch["image"]]
|
143 |
return example_batch
|
144 |
|
145 |
# Load the accuracy metric from the datasets package
|
train_results.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"epoch": 6.0,
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
-
"train_samples_per_second":
|
6 |
-
"train_steps_per_second":
|
7 |
}
|
|
|
1 |
{
|
2 |
"epoch": 6.0,
|
3 |
+
"train_loss": 0.8484223491013653,
|
4 |
+
"train_runtime": 32.6308,
|
5 |
+
"train_samples_per_second": 190.127,
|
6 |
+
"train_steps_per_second": 6.068
|
7 |
}
|
trainer_state.json
CHANGED
@@ -10,16 +10,16 @@
|
|
10 |
{
|
11 |
"epoch": 6.0,
|
12 |
"step": 198,
|
13 |
-
"total_flos":
|
14 |
-
"train_loss": 0.
|
15 |
-
"train_runtime":
|
16 |
-
"train_samples_per_second":
|
17 |
-
"train_steps_per_second":
|
18 |
}
|
19 |
],
|
20 |
"max_steps": 198,
|
21 |
"num_train_epochs": 6,
|
22 |
-
"total_flos":
|
23 |
"trial_name": null,
|
24 |
"trial_params": null
|
25 |
}
|
|
|
10 |
{
|
11 |
"epoch": 6.0,
|
12 |
"step": 198,
|
13 |
+
"total_flos": 1064007556964352.0,
|
14 |
+
"train_loss": 0.8484223491013653,
|
15 |
+
"train_runtime": 32.6308,
|
16 |
+
"train_samples_per_second": 190.127,
|
17 |
+
"train_steps_per_second": 6.068
|
18 |
}
|
19 |
],
|
20 |
"max_steps": 198,
|
21 |
"num_train_epochs": 6,
|
22 |
+
"total_flos": 1064007556964352.0,
|
23 |
"trial_name": null,
|
24 |
"trial_params": null
|
25 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3247
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b5c30f97a34f52c729b4381ca5704c7c50d318011f10167ec3bfaa9db9be0777
|
3 |
size 3247
|