Spaces:
Running
Running
fix torch jit
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ dino_v2_image_processor = AutoImageProcessor.from_pretrained("./dinov2-large")
|
|
| 14 |
|
| 15 |
# Provide a sample input for tracing
|
| 16 |
sample_input = dino_v2_image_processor(
|
| 17 |
-
images=Image.new("RGB", (
|
| 18 |
).to(torch_device)
|
| 19 |
traced_dino_v2_model = torch.jit.trace(dino_v2_model, sample_input["pixel_values"])
|
| 20 |
|
|
@@ -35,15 +35,15 @@ def process_image(image):
|
|
| 35 |
if image.mode != "RGB":
|
| 36 |
image = image.convert("RGB")
|
| 37 |
|
| 38 |
-
# Resize to
|
| 39 |
width, height = image.size
|
| 40 |
if width < height:
|
| 41 |
-
w_percent =
|
| 42 |
-
new_width =
|
| 43 |
new_height = int(float(height) * float(w_percent))
|
| 44 |
else:
|
| 45 |
-
h_percent =
|
| 46 |
-
new_height =
|
| 47 |
new_width = int(float(width) * float(h_percent))
|
| 48 |
image = image.resize((new_width, new_height), Image.LANCZOS)
|
| 49 |
|
|
|
|
| 14 |
|
| 15 |
# Provide a sample input for tracing
|
| 16 |
sample_input = dino_v2_image_processor(
|
| 17 |
+
images=Image.new("RGB", (224, 224)), return_tensors="pt"
|
| 18 |
).to(torch_device)
|
| 19 |
traced_dino_v2_model = torch.jit.trace(dino_v2_model, sample_input["pixel_values"])
|
| 20 |
|
|
|
|
| 35 |
if image.mode != "RGB":
|
| 36 |
image = image.convert("RGB")
|
| 37 |
|
| 38 |
+
# Resize to 224px while maintaining aspect ratio
|
| 39 |
width, height = image.size
|
| 40 |
if width < height:
|
| 41 |
+
w_percent = 224 / float(width)
|
| 42 |
+
new_width = 224
|
| 43 |
new_height = int(float(height) * float(w_percent))
|
| 44 |
else:
|
| 45 |
+
h_percent = 224 / float(height)
|
| 46 |
+
new_height = 224
|
| 47 |
new_width = int(float(width) * float(h_percent))
|
| 48 |
image = image.resize((new_width, new_height), Image.LANCZOS)
|
| 49 |
|