Update app.py
Browse files
app.py
CHANGED
@@ -28,15 +28,15 @@ class_labels = [
|
|
28 |
"Vest", "Underwear"
|
29 |
]
|
30 |
|
31 |
-
#
|
32 |
default_images = {
|
33 |
"T-Shirt": "tshirt.jpg",
|
34 |
-
"Jacket": "
|
35 |
-
"Sweater": "
|
36 |
-
"Dress": "
|
37 |
}
|
38 |
|
39 |
-
#
|
40 |
def preprocess_image(image):
|
41 |
"""Applies necessary transformations to the input image."""
|
42 |
transform = transforms.Compose([
|
@@ -47,7 +47,7 @@ def preprocess_image(image):
|
|
47 |
])
|
48 |
return transform(image).unsqueeze(0).to(device)
|
49 |
|
50 |
-
#
|
51 |
def classify_image(selected_default, uploaded_image):
|
52 |
"""Processes either a default or uploaded image and returns the predicted clothing category."""
|
53 |
|
@@ -69,7 +69,7 @@ def classify_image(selected_default, uploaded_image):
|
|
69 |
with torch.no_grad():
|
70 |
output = model(image)
|
71 |
|
72 |
-
#
|
73 |
if isinstance(output, tuple):
|
74 |
output = output[1] # Extract the actual output tensor
|
75 |
|
@@ -99,7 +99,7 @@ def classify_image(selected_default, uploaded_image):
|
|
99 |
print(f"[ERROR] Exception during classification: {e}")
|
100 |
return "Error in classification. Check console for details."
|
101 |
|
102 |
-
#
|
103 |
with gr.Blocks() as interface:
|
104 |
gr.Markdown("# Clothing1M Image Classifier")
|
105 |
gr.Markdown("Upload a clothing image or select from the predefined images below.")
|
@@ -127,7 +127,7 @@ with gr.Blocks() as interface:
|
|
127 |
outputs=output_text
|
128 |
)
|
129 |
|
130 |
-
#
|
131 |
if __name__ == "__main__":
|
132 |
print("[INFO] Launching Gradio interface...")
|
133 |
interface.launch()
|
|
|
28 |
"Vest", "Underwear"
|
29 |
]
|
30 |
|
31 |
+
# **Predefined Default Images**
|
32 |
default_images = {
|
33 |
"T-Shirt": "tshirt.jpg",
|
34 |
+
"Jacket": "jacket.jpg",
|
35 |
+
"Sweater": "sweater.webp",
|
36 |
+
"Dress": "dress.jpg"
|
37 |
}
|
38 |
|
39 |
+
# **Image Preprocessing Function**
|
40 |
def preprocess_image(image):
|
41 |
"""Applies necessary transformations to the input image."""
|
42 |
transform = transforms.Compose([
|
|
|
47 |
])
|
48 |
return transform(image).unsqueeze(0).to(device)
|
49 |
|
50 |
+
# **Classification Function**
|
51 |
def classify_image(selected_default, uploaded_image):
|
52 |
"""Processes either a default or uploaded image and returns the predicted clothing category."""
|
53 |
|
|
|
69 |
with torch.no_grad():
|
70 |
output = model(image)
|
71 |
|
72 |
+
# Ensure output is a tensor (handle tuple case)
|
73 |
if isinstance(output, tuple):
|
74 |
output = output[1] # Extract the actual output tensor
|
75 |
|
|
|
99 |
print(f"[ERROR] Exception during classification: {e}")
|
100 |
return "Error in classification. Check console for details."
|
101 |
|
102 |
+
# **Gradio Interface**
|
103 |
with gr.Blocks() as interface:
|
104 |
gr.Markdown("# Clothing1M Image Classifier")
|
105 |
gr.Markdown("Upload a clothing image or select from the predefined images below.")
|
|
|
127 |
outputs=output_text
|
128 |
)
|
129 |
|
130 |
+
# **Run the Interface**
|
131 |
if __name__ == "__main__":
|
132 |
print("[INFO] Launching Gradio interface...")
|
133 |
interface.launch()
|