bharathj16 commited on
Commit
99947f3
1 Parent(s): c67987a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -25
app.py CHANGED
@@ -1,31 +1,16 @@
1
- from transformers import AutoImageProcessor, DPTForDepthEstimation
2
- import torch
3
- import numpy as np
4
  from PIL import Image
5
  import requests
6
 
7
- url = "http://images.cocodataset.org/val2017/000000039769.jpg"
8
  image = Image.open(requests.get(url, stream=True).raw)
9
 
10
- image_processor = AutoImageProcessor.from_pretrained("facebook/dpt-dinov2-small-nyu")
11
- model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-nyu")
12
 
13
- # prepare image for the model
14
- inputs = image_processor(images=image, return_tensors="pt")
15
-
16
- with torch.no_grad():
17
- outputs = model(**inputs)
18
- predicted_depth = outputs.predicted_depth
19
-
20
- # interpolate to original size
21
- prediction = torch.nn.functional.interpolate(
22
- predicted_depth.unsqueeze(1),
23
- size=image.size[::-1],
24
- mode="bicubic",
25
- align_corners=False,
26
- )
27
-
28
- # visualize the prediction
29
- output = prediction.squeeze().cpu().numpy()
30
- formatted = (output * 255 / np.max(output)).astype("uint8")
31
- depth = Image.fromarray(formatted)
 
1
+ from transformers import ViTImageProcessor, ViTForImageClassification
 
 
2
  from PIL import Image
3
  import requests
4
 
5
+ url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
6
  image = Image.open(requests.get(url, stream=True).raw)
7
 
8
+ processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')
9
+ model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
10
 
11
+ inputs = processor(images=image, return_tensors="pt")
12
+ outputs = model(**inputs)
13
+ logits = outputs.logits
14
+ # model predicts one of the 1000 ImageNet classes
15
+ predicted_class_idx = logits.argmax(-1).item()
16
+ print("Predicted class:", model.config.id2label[predicted_class_idx])