import cv2
import numpy as np
import logging
from drawing_utils import draw_contours
from colors import COLOR_GREEN, COLOR_WHITE, COLOR_BLUE
import torch
import time
from PySide6.QtCore import QThread, Signal
from PySide6.QtGui import QImage, QPixmap


class MotionDetector(QThread):
    LAPLACIAN = 1.4
    DETECT_DELAY = 1
    send_img = Signal(np.ndarray)
    send_raw = Signal(np.ndarray)

    def __init__(self, video, coordinates):
        self.video = video
        self.coordinates_data = coordinates
        # self.start_frame = start_frame
        self.count = 0
        self.contours = []
        self.bounds = []
        self.centers = []
        self.mask = []
        self.begin_time = []
        # 加载车辆检测网络
        self.model = torch.hub.load('ultralytics/yolov5', 'yolov5s')

    def run(self):
        capture = cv2.VideoCapture(self.video)
        # capture.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)
	# 读取x,y坐标
        coordinates_data = self.coordinates_data
        logging.debug("coordinates data: %s", coordinates_data)
	# p为单个车位的信息,type为dist
        for p in coordinates_data:
            coordinates = self._coordinates(p)
            logging.debug("coordinates: %s", coordinates)
	    # 画出停车场的外接矩形
            rect = cv2.boundingRect(coordinates)
            center = [rect[0] + rect[2]/2, rect[1] + rect[3]/2]
            # print(len(rect))	# x,y,w,h
            logging.debug("rect: %s", rect)

            new_coordinates = coordinates.copy()
            
            new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
            new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
            
            logging.debug("new_coordinates: %s", new_coordinates)
            self.contours.append(coordinates)
            self.bounds.append(rect)
            self.centers.append(center)

            mask = cv2.drawContours(
                np.zeros((rect[3], rect[2]), dtype=np.uint8),
                [new_coordinates],
                contourIdx=-1,
                color=255,
                thickness=-1,
                lineType=cv2.LINE_8)

            mask = mask == 255
            self.mask.append(mask)
            logging.debug("mask: %s", self.mask)

        self.statuses = [False] * len(coordinates_data)
        self.times = [0] * len(coordinates_data)
        self.begin_time = [0] * len(coordinates_data)

        while capture.isOpened():
            result, frame = capture.read()
            if frame is None:
                break

            if not result:
                raise CaptureReadError("Error reading video capture on frame %s" % str(frame))
            # 每张图的检测结果,size = (1,n,6),n为检测的车辆数
            img = frame.copy()
            detection_result = self.model(frame)
        
            # 计算车辆中心,判断车辆位于哪个车位
            for car in detection_result[0]:
                min_dis = 0
                car_index = 0
                car_center = [int((car[0] + car[2])/2), int((car[1] + car[3])/2)]
                for index, center in enumerate(self.centers):
                    car_center = np.array(car_center)
                    # 计算两点距离,将距离最小的车位视为被车占用
                    dis = np.sqrt(np.sum(np.square(car_center - center)))
                    if dis < min_dis and dis < 30:
                        min_dis = dis
                        car_index = index
                        # 车位设为占用
                        self.statuses[car_index] = True
                        # 使用车位数+1
                        self.count += 1
                        # 开始记录车位占用时间
                        self.times[car_index] = time.time()
                        self.begin_time[car_index] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.times[car_index]))
                        color = COLOR_GREEN if self.statuses[index] else COLOR_BLUE
                        coordinates = self._coordinates(coordinates_data[car_index])
                        # 更改车位颜色,表示已占用
                        draw_contours(frame, coordinates, str(p["id"] + 1), COLOR_WHITE, color)
                    else:
                        # 驶离停车场
                        if not self.statuses[car_index]:
                            continue
                        self.statuses[car_index] = False
                        self.count -= 1
                        out_time = time.time()
                        self.times[car_index] = (out_time - self.times[car_index]) / 3600 # 换算成小时
                        
            self.send_raw.emit(frame if isinstance(frame, np.ndarray) else frame[0])
            self.send_img.emit(img if isinstance(img, np.ndarray) else img[0])
            
    
    def __apply(self, grayed, index, p):
        coordinates = self._coordinates(p)
        logging.debug("points: %s", coordinates)

        rect = self.bounds[index]
        logging.debug("rect: %s", rect)

        roi_gray = grayed[rect[1]:(rect[1] + rect[3]), rect[0]:(rect[0] + rect[2])]
        laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)
        logging.debug("laplacian: %s", laplacian)

        coordinates[:, 0] = coordinates[:, 0] - rect[0]
        coordinates[:, 1] = coordinates[:, 1] - rect[1]

        status = np.mean(np.abs(laplacian * self.mask[index])) < MotionDetector.LAPLACIAN
        logging.debug("status: %s", status)

        return status

    @staticmethod
    def _coordinates(p):
        return np.array(p["coordinates"])

    @staticmethod
    def same_status(coordinates_status, index, status):
        return status == coordinates_status[index]

    @staticmethod
    def status_changed(coordinates_status, index, status):
        return status != coordinates_status[index]


class CaptureReadError(Exception):
    pass
