# -*- coding:utf-8 -*-
import cv2
import numpy as np
import torch
from PIL import Image
from ultralytics import YOLO

from torchvision import transforms
import models
from scipy.spatial.distance import euclidean
from sklearn.preprocessing import normalize
from util.FeatureExtractor import FeatureExtractor
from util.utils import img_to_tensor
import os


def pool2d(tensor, type='max'):
    sz = tensor.size()
    if type == 'max':
        x = torch.nn.functional.max_pool2d(tensor, kernel_size=(round(sz[2] / 8), sz[3]))
    if type == 'mean':
        x = torch.nn.functional.mean_pool2d(tensor, kernel_size=(round(sz[2] / 8), sz[3]))
    x = x[0].cpu().data.numpy()
    x = np.transpose(x, (2, 1, 0))[0]
    return x


# Reid模型加载
use_gpu = torch.cuda.is_available()
model = models.init_model(name='resnet50', num_classes=751, loss={'softmax', 'metric'}, use_gpu=use_gpu, aligned=True)
checkpoint = torch.load("log/checkpoint_ep300.pth.tar")
model.load_state_dict(checkpoint['state_dict'])
img_transform = transforms.Compose([
    transforms.Resize((256, 128)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
myexactor = FeatureExtractor(model, ['7'])
model = model.cuda()
model.eval()

aim_list = []  # 重识别目标列表
aim_index = []  # 重识别目标标签

file_pathname = os.getcwd() + "/Aim"
for filename in os.listdir(file_pathname):
    # print(filename)
    index = filename.split('.')
    print(index[0])
    aim_index.append(index[0])
    img = cv2.imread(file_pathname + '/' + filename)
    img = cv2.resize(img, (64, 128))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = Image.fromarray(img)
    img = img_to_tensor(img, img_transform)
    img = img.cuda()
    f1 = myexactor(img)
    a1 = normalize(pool2d(f1[0], type='max'))
    aim_list.append(a1)

video = cv2.VideoCapture("行人视频.mp4")
yolo = YOLO("yolov8s.pt")

while video.isOpened():
    success, frame = video.read()
    success, frame = video.read()
    if success:
        results = yolo(frame)  # 检测结果
        boxes = results[0].boxes
        for box in boxes:
            index = int(box.cls[0].cpu().numpy().round())
            if index == 381:
                xs, ys, xd, yd = box.xyxy[0].cpu().numpy().round()  # 目标框
                person = frame[int(ys):int(yd), int(xs):int(xd)]
                person = cv2.resize(person, (64, 128))
                person = cv2.cvtColor(person, cv2.COLOR_BGR2RGB)
                person = Image.fromarray(person)
                person = img_to_tensor(person, img_transform)
                person = person.cuda()
                f2 = myexactor(person)
                a2 = normalize(pool2d(f2[0], type='max'))
                for p in range(len(aim_list)):
                    dist = np.zeros((8, 8))
                    for i in range(8):
                        temp_feat1 = aim_list[p][i]
                        for j in range(8):
                            temp_feat2 = a2[j]
                            dist[i][j] = euclidean(temp_feat1, temp_feat2)
                    # d, D, sp = dtw(dist)
                    origin_dist = np.mean(np.diag(dist))
                    # print(d, origin_dist)
                    if origin_dist < 0.65:
                        cv2.rectangle(frame, (int(xs), int(ys)), (int(xd), int(yd)), (0, 0, 255), 2)
                        cv2.putText(frame, str(aim_index[p]), (int(xs), int(ys)), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                    (0, 0, 255))
                        break
        cv2.imshow("YOLOv8 Inference", frame)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    else:
        break
video.release()
cv2.destroyAllWindows()
