Spaces:
Running
Running
BhumikaMak
commited on
Commit
·
f7b8e0e
1
Parent(s):
fa45d53
Fix: incorrect path
Browse files- __pycache__/yolov5.cpython-312.pyc +0 -0
- __pycache__/yolov8.cpython-312.pyc +0 -0
- app.py +2 -41
__pycache__/yolov5.cpython-312.pyc
ADDED
Binary file (4.67 kB). View file
|
|
__pycache__/yolov8.cpython-312.pyc
ADDED
Binary file (4.49 kB). View file
|
|
app.py
CHANGED
@@ -23,45 +23,9 @@ def process_image(image, yolo_versions=["yolov5"]):
|
|
23 |
|
24 |
|
25 |
sample_images = {
|
26 |
-
"Sample 1": os.path.join(os.getcwd(), "
|
27 |
-
"Sample 2": os.path.join(os.getcwd(), "
|
28 |
}
|
29 |
-
|
30 |
-
|
31 |
-
"""
|
32 |
-
interface = gr.Interface(
|
33 |
-
fn=process_image,
|
34 |
-
inputs=[
|
35 |
-
gr.Image(type="pil", label="Upload an Image"),
|
36 |
-
gr.CheckboxGroup(
|
37 |
-
choices=["yolov5", "yolov8s"],
|
38 |
-
value=["yolov5"], # Set the default value (YOLOv5 checked by default)
|
39 |
-
label="Select Model(s)",
|
40 |
-
),
|
41 |
-
gr.Dropdown(
|
42 |
-
choices=list(sample_images.keys()),
|
43 |
-
label="Select a Sample Image",
|
44 |
-
type="value",
|
45 |
-
interactive=True,
|
46 |
-
),
|
47 |
-
],
|
48 |
-
outputs=gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500),
|
49 |
-
title="Visualising the key image features that drive decisions with our explainable AI tool.",
|
50 |
-
description="XAI: Upload an image or select a sample to visualize object detection of your models.",
|
51 |
-
)
|
52 |
-
|
53 |
-
def main_logic(uploaded_image, selected_models, sample_selection):
|
54 |
-
# If the user selects a sample image, use that instead of the uploaded one
|
55 |
-
if sample_selection:
|
56 |
-
image = load_sample(sample_selection)
|
57 |
-
else:
|
58 |
-
image = uploaded_image
|
59 |
-
|
60 |
-
# Call the processing function
|
61 |
-
return process_image(image, selected_models)
|
62 |
-
|
63 |
-
interface.launch()
|
64 |
-
"""
|
65 |
def load_sample_image(sample_name):
|
66 |
if sample_name in sample_images:
|
67 |
try:
|
@@ -71,9 +35,6 @@ def load_sample_image(sample_name):
|
|
71 |
return None
|
72 |
return None
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
# Gradio interface
|
77 |
with gr.Blocks() as interface:
|
78 |
gr.Markdown("# Visualizing Key Features with Explainable AI")
|
79 |
gr.Markdown("Upload an image or select a sample image to visualize object detection.")
|
|
|
23 |
|
24 |
|
25 |
sample_images = {
|
26 |
+
"Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
|
27 |
+
"Sample 2": os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
|
28 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
def load_sample_image(sample_name):
|
30 |
if sample_name in sample_images:
|
31 |
try:
|
|
|
35 |
return None
|
36 |
return None
|
37 |
|
|
|
|
|
|
|
38 |
with gr.Blocks() as interface:
|
39 |
gr.Markdown("# Visualizing Key Features with Explainable AI")
|
40 |
gr.Markdown("Upload an image or select a sample image to visualize object detection.")
|