Spaces:
Running
Running
import gradio as gr | |
from ultralytics import YOLO | |
from PIL import Image | |
from ultralytics.utils.plotting import Annotator, colors | |
import glob | |
# Load model and data | |
model = YOLO('Dental_model.pt') | |
pic_files = glob.glob('*.jpg') | |
names = model.model.names | |
cloud_sdk = '<iframe src="https://cloudhand-sdk-lmpc.vercel.app/" width="100%" height="600px" style="border:none;"></iframe>' | |
def detect_objects(image): | |
image1 = image.copy() | |
results = model.predict(image) | |
classes = results[0].boxes.cls.cpu().tolist() | |
boxes = results[0].boxes.xyxy.cpu() | |
annotator = Annotator(image, line_width=3) | |
annotator1 = Annotator(image1, line_width=3) | |
for box, cls in zip(boxes, classes): | |
annotator.box_label(box, label=names[int(cls)], color=colors(int(cls))) | |
annotator1.box_label(box, label=None, color=colors(int(cls))) | |
return Image.fromarray(annotator.result()), Image.fromarray(annotator1.result()) | |
# Gradio Blocks App | |
with gr.Blocks() as demo: | |
gr.Markdown("## Dental Analysis") | |
gr.Markdown("Analyze your Dental XRAY image with our AI object Detection model") | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image(type="pil", label="Upload Image") | |
run_button = gr.Button("Run Detection") | |
example_images = gr.Examples( | |
examples=pic_files, | |
inputs=image_input, | |
label="Examples" | |
) | |
with gr.Column(): | |
image_output_1 = gr.Image(type="pil", label="Dental Analysis") | |
image_output_2 = gr.Image(type="pil", label="Without Labels") | |
html_output = gr.HTML(cloud_sdk) | |
run_button.click(fn=detect_objects, | |
inputs=image_input, | |
outputs=[image_output_1, image_output_2]) | |
if __name__ == "__main__": | |
demo.launch() | |