|
import os |
|
import cv2 |
|
import numpy as np |
|
import streamlit as st |
|
import matplotlib.pyplot as plt |
|
from shapely.geometry import Polygon, box as shapely_box |
|
import subprocess |
|
|
|
|
|
|
|
|
|
def extract_class_0_coordinates(filename): |
|
class_0_coordinates = [] |
|
current_class = None |
|
|
|
with open(filename, 'r') as file: |
|
|
|
for line in file: |
|
parts = line.strip().split() |
|
if len(parts) == 0: |
|
continue |
|
|
|
if parts[0] == '0': |
|
coordinates = [float(x) for x in parts[1:]] |
|
class_0_coordinates.extend(coordinates) |
|
|
|
return class_0_coordinates |
|
|
|
def run_yolo_models1(img): |
|
|
|
os.system(f"python segment/predict.py --source {img} --img 640 --device cpu --weights models/segment/best-2.pt --name yolov9_c_640_detect --exist-ok --save-txt") |
|
|
|
|
|
os.system(f"python detect.py --source {img} --img 640 --device cpu --weights models/detect/yolov9-s-converted.pt --name yolov9_c_640_detect --exist-ok --save-txt") |
|
|
|
def parse_yolo_box(box_string): |
|
"""Parse a YOLO format bounding box string.""" |
|
values = list(map(float, box_string.split())) |
|
if len(values) < 5: |
|
raise ValueError(f"Expected at least 5 values, got {len(values)}") |
|
return values[0], values[1], values[2], values[3], values[4] |
|
|
|
def read_yolo_boxes(file_path): |
|
"""Read YOLO format bounding boxes from a file.""" |
|
with open(file_path, 'r') as f: |
|
return [parse_yolo_box(line.strip()) for line in f if line.strip()] |
|
|
|
def yolo_to_pixel_coord(x, y, img_width, img_height): |
|
"""Convert a single YOLO coordinate to pixel coordinate.""" |
|
return int(x * img_width), int(y * img_height) |
|
|
|
|
|
def yolo_to_pixel_coords(x_center, y_center, width, height, img_width, img_height): |
|
"""Convert YOLO format coordinates to pixel coordinates.""" |
|
x1 = int((x_center - width / 2) * img_width) |
|
y1 = int((y_center - height / 2) * img_height) |
|
x2 = int((x_center + width / 2) * img_width) |
|
y2 = int((y_center + height / 2) * img_height) |
|
return x1, y1, x2, y2 |
|
|
|
def box_segment_relationship(yolo_box, segment, img_width, img_height, threshold): |
|
"""Check the relationship between a bounding box and a segmented area.""" |
|
class_id, x_center, y_center, width, height = yolo_box |
|
x1, y1, x2, y2 = yolo_to_pixel_coords(x_center, y_center, width, height, img_width, img_height) |
|
pixel_segment = convert_segment_to_pixel(segment, img_width, img_height) |
|
segment_polygon = Polygon(zip(pixel_segment[::2], pixel_segment[1::2])) |
|
box_polygon = shapely_box(x1, y1, x2, y2) |
|
|
|
if box_polygon.intersects(segment_polygon): |
|
return "intersecting" |
|
elif box_polygon.distance(segment_polygon) <= threshold: |
|
return "obstructed" |
|
else: |
|
return "not touching" |
|
def convert_segment_to_pixel(segment, img_width, img_height): |
|
"""Convert segment coordinates from YOLO format to pixel coordinates.""" |
|
pixel_segment = [] |
|
for i in range(0, len(segment), 2): |
|
x, y = yolo_to_pixel_coord(segment[i], segment[i+1], img_width, img_height) |
|
pixel_segment.extend([x, y]) |
|
return pixel_segment |
|
|
|
def plot_boxes_and_segment(image, yolo_boxes, segment, img_width, img_height, threshold): |
|
"""Plot the image with intersecting boxes, obstructed boxes, and segment.""" |
|
fig, ax = plt.subplots(figsize=(12, 8)) |
|
ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) |
|
|
|
pixel_segment = convert_segment_to_pixel(segment, img_width, img_height) |
|
ax.plot(pixel_segment[::2] + [pixel_segment[0]], pixel_segment[1::2] + [pixel_segment[1]], 'g-', linewidth=2, label='Rail Zone') |
|
|
|
colors = {'intersecting': 'r', 'obstructed': 'y', 'not touching': 'b'} |
|
labels = {'intersecting': 'Intersecting Box', 'obstructed': 'Obstructed Box', 'not touching': 'Non-interacting Box'} |
|
|
|
for yolo_box in yolo_boxes: |
|
class_id, x_center, y_center, width, height = yolo_box |
|
x1, y1, x2, y2 = yolo_to_pixel_coords(x_center, y_center, width, height, img_width, img_height) |
|
relationship = box_segment_relationship(yolo_box, segment, img_width, img_height, threshold) |
|
color = colors[relationship] |
|
label = labels[relationship] |
|
ax.add_patch(plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor=color, linewidth=2, label=label)) |
|
|
|
ax.legend() |
|
ax.axis('off') |
|
plt.tight_layout() |
|
return fig |
|
|
|
def main(): |
|
st.title("YOLO Analysis App") |
|
|
|
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png"]) |
|
if uploaded_file is not None: |
|
image = cv2.imdecode(np.fromstring(uploaded_file.read(), np.uint8), 1) |
|
st.image(image, caption='Uploaded Image', use_column_width=True) |
|
|
|
if st.button('Run Analysis'): |
|
with st.spinner("Running detection..."): |
|
img_height, img_width = image.shape[:2] |
|
|
|
|
|
temp_image_path = "temp_image.jpg" |
|
cv2.imwrite(temp_image_path, image) |
|
|
|
|
|
run_yolo_models1(temp_image_path) |
|
|
|
label_path = 'runs/predict-seg/yolov9_c_640_detect/labels/temp_image.txt' |
|
label_path2 = 'runs/detect/yolov9_c_640_detect/labels/temp_image.txt' |
|
|
|
segment = extract_class_0_coordinates(label_path) |
|
yolo_boxes = read_yolo_boxes(label_path2) |
|
|
|
threshold = 10 |
|
|
|
fig = plot_boxes_and_segment(image, yolo_boxes, segment, img_width, img_height, threshold) |
|
st.pyplot(fig) |
|
|
|
st.subheader("Analysis Results:") |
|
for yolo_box in yolo_boxes: |
|
result = box_segment_relationship(yolo_box, segment, img_width, img_height, threshold) |
|
st.write(f"Box {yolo_box} is {result} the segment.") |
|
|
|
|
|
os.remove(temp_image_path) |
|
os.remove(label_path) |
|
os.remove(label_path2) |
|
|
|
if __name__ == "__main__": |
|
main() |