Felix Marty commited on
Commit
f296fc3
1 Parent(s): 36beed7

added model

Browse files
README.md CHANGED
@@ -1,3 +1,5 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+
5
+ A model trained on the beans dataset, just for testing and having a really tiny model.
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "eval_accuracy": 0.518796992481203,
4
+ "eval_loss": 0.9727851152420044,
5
+ "eval_runtime": 0.6952,
6
+ "eval_samples_per_second": 191.321,
7
+ "eval_steps_per_second": 24.455,
8
+ "train_loss": 0.9793546272046638,
9
+ "train_runtime": 46.4278,
10
+ "train_samples_per_second": 133.627,
11
+ "train_steps_per_second": 4.265
12
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ResNetForImageClassification"
4
+ ],
5
+ "depths": [
6
+ 2,
7
+ 2
8
+ ],
9
+ "downsample_in_first_stage": false,
10
+ "embedding_size": 64,
11
+ "hidden_act": "relu",
12
+ "hidden_sizes": [
13
+ 32,
14
+ 64
15
+ ],
16
+ "id2label": {
17
+ "0": "LABEL_0",
18
+ "1": "LABEL_1",
19
+ "2": "LABEL_2"
20
+ },
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2
25
+ },
26
+ "layer_type": "basic",
27
+ "model_type": "resnet",
28
+ "num_channels": 1,
29
+ "problem_type": "single_label_classification",
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.21.0.dev0"
32
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "eval_accuracy": 0.518796992481203,
4
+ "eval_loss": 0.9727851152420044,
5
+ "eval_runtime": 0.6952,
6
+ "eval_samples_per_second": 191.321,
7
+ "eval_steps_per_second": 24.455
8
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_pct": null,
3
+ "do_normalize": false,
4
+ "do_resize": false,
5
+ "feature_extractor_type": "ConvNextFeatureExtractor",
6
+ "image_mean": [
7
+ 0.45
8
+ ],
9
+ "image_std": [
10
+ 0.22
11
+ ],
12
+ "resample": 3,
13
+ "size": 224
14
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fed11a151a68d9df16a542bac37db8525a7832fdf78c3bb56c3b2a409a717e2f
3
+ size 761689
train.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional
5
+
6
+ import datasets
7
+ import torch
8
+ import transformers
9
+ from torchinfo import summary
10
+ from torchvision.transforms import Compose, Normalize, ToTensor
11
+ from transformers import (
12
+ ConvNextFeatureExtractor,
13
+ HfArgumentParser,
14
+ ResNetConfig,
15
+ ResNetForImageClassification,
16
+ Trainer,
17
+ TrainingArguments,
18
+ )
19
+ from transformers.utils import check_min_version
20
+ from transformers.utils.versions import require_version
21
+
22
+ import numpy as np
23
+
24
+
25
+ @dataclass
26
+ class DataTrainingArguments:
27
+ """
28
+ Arguments pertaining to what data we are going to input our model for training and eval.
29
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
30
+ them on the command line.
31
+ """
32
+
33
+ train_val_split: Optional[float] = field(
34
+ default=0.15, metadata={"help": "Percent to split off of train for validation."}
35
+ )
36
+ max_train_samples: Optional[int] = field(
37
+ default=None,
38
+ metadata={
39
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
40
+ "value if set."
41
+ },
42
+ )
43
+ max_eval_samples: Optional[int] = field(
44
+ default=None,
45
+ metadata={
46
+ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
47
+ "value if set."
48
+ },
49
+ )
50
+
51
+
52
+ def collate_fn(examples):
53
+ pixel_values = torch.stack([example["pixel_values"] for example in examples])
54
+ labels = torch.tensor([example["labels"] for example in examples])
55
+ return {"pixel_values": pixel_values, "labels": labels}
56
+
57
+
58
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
59
+ check_min_version("4.19.0.dev0")
60
+
61
+ require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
62
+
63
+ logger = logging.getLogger(__name__)
64
+
65
+ def main():
66
+ parser = HfArgumentParser((DataTrainingArguments, TrainingArguments))
67
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
68
+ # If we pass only one argument to the script and it's the path to a json file,
69
+ # let's parse it to get our arguments.
70
+ data_args, training_args = parser.parse_json_file(
71
+ json_file=os.path.abspath(sys.argv[1])
72
+ )
73
+ else:
74
+ data_args, training_args = parser.parse_args_into_dataclasses()
75
+
76
+ # Setup logging
77
+ logging.basicConfig(
78
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
79
+ datefmt="%m/%d/%Y %H:%M:%S",
80
+ handlers=[logging.StreamHandler(sys.stdout)],
81
+ )
82
+
83
+ log_level = training_args.get_process_log_level()
84
+ logger.setLevel(log_level)
85
+ transformers.utils.logging.set_verbosity(log_level)
86
+ transformers.utils.logging.enable_default_handler()
87
+ transformers.utils.logging.enable_explicit_format()
88
+
89
+ # Log on each process the small summary:
90
+ logger.warning(
91
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
92
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
93
+ )
94
+
95
+ dataset = datasets.load_dataset("beans")
96
+
97
+ data_args.train_val_split = (
98
+ None if "validation" in dataset.keys() else data_args.train_val_split
99
+ )
100
+ if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
101
+ split = dataset["train"].train_test_split(data_args.train_val_split)
102
+ dataset["train"] = split["train"]
103
+ dataset["validation"] = split["test"]
104
+
105
+ feature_extractor = ConvNextFeatureExtractor(
106
+ do_resize=False, do_normalize=False, image_mean=[0.45], image_std=[0.22]
107
+ )
108
+
109
+ config = ResNetConfig(
110
+ num_channels=1,
111
+ layer_type="basic",
112
+ depths=[2, 2],
113
+ hidden_sizes=[32, 64],
114
+ num_labels=3,
115
+ )
116
+
117
+ model = ResNetForImageClassification(config)
118
+
119
+ # Define torchvision transforms to be applied to each image.
120
+ normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
121
+ _transforms = Compose([ToTensor(), normalize])
122
+
123
+ def transforms(example_batch):
124
+ """Apply _train_transforms across a batch."""
125
+ # black and white
126
+ example_batch["pixel_values"] = [_transforms(pil_img.convert("L")) for pil_img in example_batch["image"]]
127
+ return example_batch
128
+
129
+ # Load the accuracy metric from the datasets package
130
+ metric = datasets.load_metric("accuracy")
131
+
132
+ # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
133
+ # predictions and label_ids field) and has to return a dictionary string to float.
134
+ def compute_metrics(p):
135
+ """Computes accuracy on a batch of predictions"""
136
+
137
+ accuracy = metric.compute(predictions=np.argmax(p.predictions, axis=1), references=p.label_ids)
138
+ return accuracy
139
+
140
+ if training_args.do_train:
141
+ if data_args.max_train_samples is not None:
142
+ dataset["train"] = (
143
+ dataset["train"]
144
+ .shuffle(seed=training_args.seed)
145
+ .select(range(data_args.max_train_samples))
146
+ )
147
+
148
+ logger.info("Setting train transform")
149
+ # Set the training transforms
150
+ dataset["train"].set_transform(transforms)
151
+
152
+ if training_args.do_eval:
153
+ if "validation" not in dataset:
154
+ raise ValueError("--do_eval requires a validation dataset")
155
+ if data_args.max_eval_samples is not None:
156
+ dataset["validation"] = (
157
+ dataset["validation"]
158
+ .shuffle(seed=training_args.seed)
159
+ .select(range(data_args.max_eval_samples))
160
+ )
161
+
162
+ logger.info("Setting validation transform")
163
+ # Set the validation transforms
164
+ dataset["validation"].set_transform(transforms)
165
+
166
+ from transformers import trainer_utils
167
+
168
+ print(dataset)
169
+
170
+ training_args = transformers.TrainingArguments(
171
+ output_dir=training_args.output_dir,
172
+ do_eval=training_args.do_eval,
173
+ do_train=training_args.do_train,
174
+ logging_steps = 500,
175
+ eval_steps = 500,
176
+ save_steps= 500,
177
+ remove_unused_columns = False, # we need to pass the `label` and `image`
178
+ per_device_train_batch_size = 32,
179
+ save_total_limit = 2,
180
+ evaluation_strategy = "steps",
181
+ num_train_epochs = 6,
182
+ )
183
+
184
+ logger.info(f"Training/evaluation parameters {training_args}")
185
+
186
+ trainer = Trainer(
187
+ model=model,
188
+ args=training_args,
189
+ train_dataset=dataset["train"] if training_args.do_train else None,
190
+ eval_dataset=dataset["validation"] if training_args.do_eval else None,
191
+ compute_metrics=compute_metrics,
192
+ tokenizer=feature_extractor,
193
+ data_collator=collate_fn,
194
+ )
195
+
196
+ # Training
197
+ if training_args.do_train:
198
+ train_result = trainer.train()
199
+ trainer.save_model()
200
+ trainer.log_metrics("train", train_result.metrics)
201
+ trainer.save_metrics("train", train_result.metrics)
202
+ trainer.save_state()
203
+
204
+ # Evaluation
205
+ if training_args.do_eval:
206
+ metrics = trainer.evaluate()
207
+ trainer.log_metrics("eval", metrics)
208
+ trainer.save_metrics("eval", metrics)
209
+
210
+ if __name__ == "__main__":
211
+ main()
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "train_loss": 0.9793546272046638,
4
+ "train_runtime": 46.4278,
5
+ "train_samples_per_second": 133.627,
6
+ "train_steps_per_second": 4.265
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 6.0,
5
+ "global_step": 198,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 6.0,
12
+ "step": 198,
13
+ "total_flos": 1708758414000000.0,
14
+ "train_loss": 0.9793546272046638,
15
+ "train_runtime": 46.4278,
16
+ "train_samples_per_second": 133.627,
17
+ "train_steps_per_second": 4.265
18
+ }
19
+ ],
20
+ "max_steps": 198,
21
+ "num_train_epochs": 6,
22
+ "total_flos": 1708758414000000.0,
23
+ "trial_name": null,
24
+ "trial_params": null
25
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7263e66f73eef0346825fd1736d362a11c375b16f528efa9b6d6c32e654631d4
3
+ size 3247