SciStalk's picture
Small changes
b375b35
import os
import gradio as gr
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from ultralytics import YOLO
image_directory = '/home/user/app/flat_images'
img_files = [file for file in os.listdir(
image_directory) if file.lower().endswith('.jpg') or file.lower().endswith('.png')]
path = [os.path.join(image_directory, filename) for filename in img_files]
model = YOLO('/home/user/app/best.pt')
inputs_image = [
gr.components.Image(type="filepath", label="Input Image"),
]
outputs = [
gr.components.Textbox(label="Комнатность"),
gr.components.Image(type="numpy", label="Визуализация"),
]
def show_preds_image(image_path):
results = model(image_path)
bedrooms = 0
for r in results:
im_array = r.plot(conf=True, labels=True, boxes=True)
im_rgb = cv2.cvtColor(im_array, cv2.COLOR_BGR2RGB)
try:
bedrooms = len(results[0])
except Exception as e:
pass
if bedrooms == 0:
bedrooms = 'Студия'
return bedrooms, im_rgb
# for result in results:
# # Extract bounding boxes from the result. 'boxes.data' is a tensor containing the boxes.
# # Convert tensor to numpy array and move to CPU if necessary
# boxes_data = result.boxes.data.cpu().numpy()
# # Extract class names from the result, assuming 'result.names' is a list of class names.
# class_names = result.names
# # For each bounding box, draw a rectangle and label on the image
# for box_data in boxes_data:
# # Extract data
# x1, y1, x2, y2, confidence, class_id = box_data
# # Ensure class_id is an integer for indexing
# class_id = int(class_id)
# width = x2 - x1
# height = y2 - y1
# # Draw a rectangle on the image
# rect = patches.Rectangle(
# (x1, y1), width, height, linewidth=1, edgecolor='r', facecolor='none')
# ax.add_patch(rect)
# # If class names are available, display the class label on the image
# if class_names:
# # Include confidence score in the label
# label = f"{class_names[class_id]}: {confidence:.2f}"
# plt.text(x1, y1, label, color='white',
# bbox=dict(facecolor='red', alpha=0.5))
interface_image = gr.Interface(
fn=show_preds_image,
inputs=inputs_image,
outputs=outputs,
title="Floor Plan Detector",
examples=path,
cache_examples=False,
)
gr.TabbedInterface(
[interface_image],
tab_names=['Image Inference'],
).queue().launch(debug=True)