bkbgamingcatch / app.py
Gaeulove924's picture
Update app.py
c18f521 verified
raw
history blame contribute delete
No virus
6.05 kB
from fastai.vision.all import *
from io import BytesIO
import requests
import streamlit as st
import numpy as np
import torch
import time
import cv2
from numpy import random
import sys
sys.path.append('./yolov7') # 確保 yolov7 目錄在 Python 路徑中
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.plots import plot_one_box
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
shape = img.shape[:2] # 當前形狀 [高度, 寬度]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# 縮放比例(新/舊)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # 僅縮小,不放大(以獲得更好的測試 mAP)
r = min(r, 1.0)
# 計算填充
ratio = r, r # 寬度、高度比例
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # 寬高填充
if auto: # 最小矩形
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # 寬高填充
elif scaleFill: # 拉伸
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # 寬度、高度比例
dw /= 2 # 將填充分成兩邊
dh /= 2
if shape[::-1] != new_unpad: # 調整大小
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # 添加邊框
return img, ratio, (dw, dh)
def detect_modify(img0, model, conf, imgsz=640, conf_thres=0.25, iou_thres=0.45):
st.image(img0, caption="您的圖片", use_column_width=True)
stride = int(model.stride.max()) # 模型步幅
imgsz = check_img_size(imgsz, s=stride) # 檢查圖片大小
# 填充調整大小
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
img = letterbox(img0, imgsz, stride=stride)[0]
# 轉換
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR轉RGB, 到3x416x416
img = np.ascontiguousarray(img)
# 獲取名稱和顏色
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# 進行推理
old_img_w = old_img_h = imgsz
old_img_b = 1
t0 = time.time()
img = torch.from_numpy(img).to(device)
img = img / 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# 推理
with torch.no_grad(): # 計算梯度會導致GPU內存洩漏
pred = model(img)[0]
# 應用NMS(非極大值抑制)
pred = non_max_suppression(pred, conf, iou_thres)
# 處理檢測
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # 正規化增益 whwh
det = pred[0]
if len(det):
# 將框從img_size縮放到im0大小
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
# 打印結果
s = ''
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # 每類檢測數量
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # 添加到字符串
# 寫入結果
for *xyxy, conf, cls in reversed(det):
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=3)
f"""
### 預測結果:
"""
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_BGR2RGB)
st.image(img0, caption="預測結果", use_column_width=True)
# 設置參數
weight_path = './best.pt'
imgsz = 640
conf = 0.6
conf_thres = 0.25
iou_thres = 0.45
device = torch.device("cpu")
path = "./"
# 加載模型
model = attempt_load(weight_path, map_location=torch.device('cpu')) # 加載FP32模型
def process_video(video_path, model, conf, imgsz=640, conf_thres=0.25, iou_thres=0.45):
cap = cv2.VideoCapture(video_path)
stframe = st.empty()
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
detect_modify(frame, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
stframe.image(frame, channels="RGB")
cap.release()
"""
# 籃球畫面物件捕捉
使用者可以將您想測試的圖片或影片上傳,模型將會把球員、籃球與籃框這三個物件捕捉出來,並將其附上對應的框與信任指數,
而使用者也可以藉由拖曳畫面上的指標,來選擇自己想要探測的信任指數。
"""
option = st.radio("", ["上傳圖片", "圖片 URL", "上傳影片"])
conf = st.slider("選擇置信度閾值:", min_value=0.0, max_value=1.0, value=0.6)
if option == "上傳圖片":
uploaded_file = st.file_uploader("請上傳圖片。")
if uploaded_file is not None:
img = PILImage.create(uploaded_file)
detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
elif option == "圖片 URL":
url = st.text_input("請輸入網址。")
if url != "":
try:
response = requests.get(url)
pil_img = PILImage.create(BytesIO(response.content))
detect_modify(pil_img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
except:
st.text("讀取圖像有問題", url)
elif option == "上傳影片":
uploaded_video = st.file_uploader("請上傳影片。", type=["mp4", "avi", "mov", "mkv"])
if uploaded_video is not None:
video_path = uploaded_video.name
with open(video_path, mode='wb') as f:
f.write(uploaded_video.read())
process_video(video_path, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)