Andron00e commited on
Commit
336e883
1 Parent(s): 5107e88

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +20 -2
README.md CHANGED
@@ -252,13 +252,21 @@ The following hyperparameters were used during training:
252
 
253
  ### Example of usage
254
 
 
 
255
  ```python
256
- from datasets import load_dataset
 
 
 
 
257
  from transformers import TrainingArguments
 
258
  from transformers import CLIPProcessor, AutoModelForImageClassification
 
259
 
260
  processor = CLIPProcessor.from_pretrained("Andron00e/CLIPForImageClassification-v1")
261
- model = AutoModelForImageClassification.from_pretrained("Andron00e/CLIPForImageClassification-v1")
262
 
263
  dataset = load_dataset("Andron00e/CIFAR10-custom")
264
  dataset = dataset["train"].train_test_split(test_size=0.2)
@@ -271,6 +279,8 @@ dataset = DatasetDict({
271
  "test": val_test["test"],
272
  })
273
 
 
 
274
  def transform(example_batch):
275
  inputs = processor(text=[classes[x] for x in example_batch['labels']], images=[x for x in example_batch['image']], padding=True, return_tensors='pt')
276
  inputs['labels'] = example_batch['labels']
@@ -284,6 +294,11 @@ def collate_fn(batch):
284
  'labels': torch.tensor([x['labels'] for x in batch])
285
  }
286
 
 
 
 
 
 
287
  training_args = TrainingArguments(
288
  output_dir="./outputs",
289
  per_device_train_batch_size=16,
@@ -322,4 +337,7 @@ trainer.save_state()
322
  metrics = trainer.evaluate(processed_dataset['test'])
323
  trainer.log_metrics("eval", metrics)
324
  trainer.save_metrics("eval", metrics)
 
 
 
325
  ```
 
252
 
253
  ### Example of usage
254
 
255
+ Simple demo for Google Colab
256
+
257
  ```python
258
+ !pip install datasets transformers[torch] accelerate -U
259
+ !git clone https://github.com/Andron00e/CLIPForImageClassification
260
+ %cd CLIPForImageClassification/clip_for_classification
261
+
262
+ import torch
263
  from transformers import TrainingArguments
264
+ from datasets import load_dataset, load_metric
265
  from transformers import CLIPProcessor, AutoModelForImageClassification
266
+ from modeling_clipforimageclassification import CLIPForImageClassification
267
 
268
  processor = CLIPProcessor.from_pretrained("Andron00e/CLIPForImageClassification-v1")
269
+ model = CLIPForImageClassification.from_pretrained("Andron00e/CLIPForImageClassification-v1", 10)
270
 
271
  dataset = load_dataset("Andron00e/CIFAR10-custom")
272
  dataset = dataset["train"].train_test_split(test_size=0.2)
 
279
  "test": val_test["test"],
280
  })
281
 
282
+ classes = {0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck"}
283
+
284
  def transform(example_batch):
285
  inputs = processor(text=[classes[x] for x in example_batch['labels']], images=[x for x in example_batch['image']], padding=True, return_tensors='pt')
286
  inputs['labels'] = example_batch['labels']
 
294
  'labels': torch.tensor([x['labels'] for x in batch])
295
  }
296
 
297
+ metric = load_metric("accuracy")
298
+
299
+ def compute_metrics(p):
300
+ return metric.compute(predictions=np.argmax(p.predictions, axis=1), references=p.label_ids)
301
+
302
  training_args = TrainingArguments(
303
  output_dir="./outputs",
304
  per_device_train_batch_size=16,
 
337
  metrics = trainer.evaluate(processed_dataset['test'])
338
  trainer.log_metrics("eval", metrics)
339
  trainer.save_metrics("eval", metrics)
340
+
341
+ %cd ..
342
+ %cd ..
343
  ```