tianching's picture
Update app.py
a702d56 verified
from fastai.vision.all import *
from io import BytesIO
import requests
import streamlit as st
import os
import random
import numpy as np
import torch
import time
import cv2
from numpy import random
from models.experimental import attempt_load
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
# Function to randomly choose a default image
def choose_default_image():
default_images_path = "./default_images" # Path to folder containing default images
default_images = os.listdir(default_images_path)
default_image_path = os.path.join(default_images_path, random.choice(default_images))
return default_image_path
# Function to detect on default image
def detect_default_image(model, conf=0.4, imgsz=640, conf_thres=0.25, iou_thres=0.45):
default_image_path = choose_default_image()
img = PILImage.create(default_image_path)
detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def detect_modify(img0, model, conf=0.4, imgsz=640, conf_thres = 0.25, iou_thres=0.45):
st.image(img0, caption="Your image", use_column_width=True)
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
# Padded resize
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
img = letterbox(img0, imgsz, stride=stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
old_img_w = old_img_h = imgsz
old_img_b = 1
t0 = time.time()
img = torch.from_numpy(img).to(device)
# img /= 255.0 # 0 - 255 to 0.0 - 1.0
img = img/255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
# t1 = time_synchronized()
with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
pred = model(img)[0]
# t2 = time_synchronized()
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres)
# t3 = time_synchronized()
# Process detections
# for i, det in enumerate(pred): # detections per image
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # normalization gain whwh
det = pred[0]
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
# Print results
s = ''
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1)
f"""
### Prediction result:
"""
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_BGR2RGB)
st.image(img0, caption="Prediction Result", use_column_width=True)
#set paramters
weight_path = './best.pt'
imgsz = 640
conf = 0.4
conf_thres = 0.25
iou_thres=0.45
device = torch.device("cpu")
path = "./"
# Load model
model = attempt_load(weight_path, map_location=torch.device('cpu')) # load FP32 model
"""
# Rat Detection using YOLOv7
Rats are common pests in urban and rural environments, posing threats to public health and causing damage to property. Effective rat detection is crucial for pest control and management. However, manual rat detection can be time-consuming and labor-intensive. Therefore, we developed an object detection model using YOLOv7 specifically tailored for rat detection. This model aims to automate the process of rat detection, making it faster, more efficient, and accessible.
Usage Instructions:
1.Upload Image: Users can upload their own images containing rats for detection.
2.Image URL: Users can input the URL of an image containing rats for detection.
3.Use Random Default Image: Users can select a default image provided by the system for detection. The system will randomly choose one of the default images and perform detection on it.
Upon selecting an option, the model will perform rat detection on the chosen image and display the results, including bounding boxes around detected rats. Users can then analyze the results to identify rat presence in the image.
"""
# Modify Streamlit app code
option = st.radio("", ["Upload Image", "Image URL", "Use Random Default Image"])
if option == "Upload Image":
uploaded_file = st.file_uploader("Please upload an image.")
if uploaded_file is not None:
img = PILImage.create(uploaded_file)
detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
elif option == "Image URL":
url = st.text_input("Please input a URL.")
if url != "":
try:
response = requests.get(url)
pil_img = PILImage.create(BytesIO(response.content))
detect_modify(pil_img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
except:
st.text("Problem reading image from", url)
else:
detect_default_image(model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)