import json
import random

import networkx as nx
import os
import time

import cv2
import numpy as np
import torch
from PyQt5.QtCore import QThread, pyqtSignal, QTimer, QMutex, QObject, QSize
from PyQt5.QtDataVisualization import QAbstract3DSeries, QAbstract3DGraph, Q3DCamera, QScatterDataProxy, Q3DTheme, \
    QScatter3DSeries, QScatterDataItem, Q3DScatter
from PyQt5.QtGui import QImage, QPixmap, QFont, QVector3D, QColor
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMenu, QAction, QComboBox, QWidget, QSizePolicy
from torch.backends import cudnn

from main_win.newwindow import Ui_MainWindow
import sys

from models.common import DetectMultiBackend
from utils.dataloaders import LoadWebcam, LoadImages
from utils.general import check_img_size, check_imshow, non_max_suppression, scale_coords
from utils.torch_utils import select_device
from utils.plots import colors, plot_one_box

from mathfunc import camera_calibration as cc

total_count = 0
total_count_lock = QMutex()
thread_num = 4


class ScatterDataModifier(QObject):
    numberOfItems = 3600
    curveDivider = 3.0
    lowerNumberOfItems = 900
    lowerCurveDivider = 0.75

    backgroundEnabledChanged = pyqtSignal(bool)
    gridEnabledChanged = pyqtSignal(bool)
    shadowQualityChanged = pyqtSignal(int)
    fontChanged = pyqtSignal(QFont)

    def __init__(self, scatter):
        super(ScatterDataModifier, self).__init__()

        self.m_graph = scatter  # Q3DScatter实例
        self.m_fontSize = 20.0
        self.m_style = QAbstract3DSeries.MeshSphere
        self.m_smooth = True
        self.m_itemCount = self.lowerNumberOfItems
        self.m_curveDivider = self.lowerCurveDivider

        # 尾流粒子数量
        self.wake_particle = 50

        self.colors = [QColor(255, 0, 0, 10), QColor(0, 130, 252, 10), QColor(253, 216, 69, 10),
                       QColor(34, 237, 124, 10), QColor(9, 176, 211, 10), QColor(29, 39, 201, 10),
                       QColor(249, 226, 100, 10)]

        # 设置当前主题类型
        # self.m_graph.activeTheme().setType(Q3DTheme.ThemeEbony)
        self.m_graph.activeTheme().setType(Q3DTheme.ThemeEbony)
        # 设置当前主题的字体
        font = self.m_graph.activeTheme().font()
        font.setPointSize(int(self.m_fontSize))
        self.m_graph.activeTheme().setFont(font)
        # 设置阴影质量
        # self.m_graph.setShadowQuality(QAbstract3DGraph.ShadowQualitySoftLow)
        self.m_graph.scene().activeCamera().setCameraPreset(
            Q3DCamera.CameraPresetFront)

        self.m_graph.setAspectRatio(1)

        self.m_graph.axisX().setRange(-240.00, -220.00)
        self.m_graph.axisY().setRange(-10.00, 10.00)
        self.m_graph.axisZ().setRange(0, 30.00)

        self.iniData()

    def setAxisRange(self, x_range, y_range, z_range):
        self.m_graph.axisX().setRange(*x_range)
        self.m_graph.axisY().setRange(*z_range)
        self.m_graph.axisZ().setRange(*y_range)

    def iniData(self):
        self.m_graph.axisX().setTitle("X")
        self.m_graph.axisX().setTitleVisible(True)
        self.m_graph.axisY().setTitle("Height")
        self.m_graph.axisY().setTitleVisible(True)
        self.m_graph.axisZ().setTitle("Y")
        self.m_graph.axisZ().setTitleVisible(True)

        self.m_graph.axisY().setLabelAutoRotation(90)

        # 添加警戒
        proxy = QScatterDataProxy()
        series = QScatter3DSeries(proxy)
        series.setItemLabelFormat(
            "@xTitle: @xLabel @yTitle: @yLabel @zTitle: @zLabel")
        series.setMeshSmooth(self.m_smooth)
        series.setItemSize(0.005)
        series.setBaseColor(self.colors[0])
        self.m_graph.addSeries(series)

    def setWarn(self, x_warn, y_range, z_range):
        self.m_graph.seriesList()[0].dataProxy().removeItems(0, -1)
        dataArray = []
        lidu = 2
        y_range = [int(lidu * x) for x in y_range]
        z_range = [int(lidu * x) for x in z_range]
        for j in range(*y_range):
            for k in range(*z_range):
                itm = QScatterDataItem(
                    QVector3D(x_warn,
                              k / lidu,
                              j / lidu))
                dataArray.append(itm)

        self.m_graph.seriesList()[0].dataProxy().addItems(dataArray)

    def addData(self, data, series_index):
        dataArray = []

        itm = QScatterDataItem(
            QVector3D(data[0],
                      data[2],
                      data[1]))
        dataArray.append(itm)

        series_len = len(self.m_graph.seriesList())
        series_index += 1
        if series_index >= series_len:
            proxy = QScatterDataProxy()
            series = QScatter3DSeries(proxy)
            series.setItemLabelFormat(
                "@xTitle: @xLabel @yTitle: @yLabel @zTitle: @zLabel")
            series.setMeshSmooth(self.m_smooth)
            series.setItemSize(0.02)
            series.setBaseColor(self.colors[series_index % len(self.colors)])
            self.m_graph.addSeries(series)

        self.m_graph.seriesList()[series_index].dataProxy().addItems(dataArray)
        count = self.m_graph.seriesList()[series_index].dataProxy().itemCount()
        if count > self.wake_particle:
            self.m_graph.seriesList()[series_index].dataProxy().removeItems(0, count - self.wake_particle)

        # self.m_graph.seriesList()[0].dataProxy().resetArray(dataArray)

    def clearData(self):
        for index, series in enumerate(self.m_graph.seriesList()):
            if index != 0:
                self.m_graph.removeSeries(series)

    def changeStyle(self, style):
        comboBox = self.sender()
        if isinstance(comboBox, QComboBox):
            self.m_style = QAbstract3DSeries.Mesh(comboBox.itemData(style))
            self.m_graph.seriesList()[0].setMesh(self.m_style)

    def setSmoothDots(self, smooth):
        self.m_smooth = bool(smooth)
        self.m_graph.seriesList()[0].setMeshSmooth(self.m_smooth)

    def changeTheme(self, theme):
        currentTheme = self.m_graph.activeTheme()
        currentTheme.setType(Q3DTheme.Theme(theme))
        self.backgroundEnabledChanged.emit(currentTheme.isBackgroundEnabled())
        self.gridEnabledChanged.emit(currentTheme.isGridEnabled())
        self.fontChanged.emit(currentTheme.font())

    preset = int(Q3DCamera.CameraPresetFrontLow)

    def changePresetCamera(self):
        self.m_graph.scene().activeCamera().setCameraPreset(
            Q3DCamera.CameraPreset(self.preset))

        self.preset += 1

        if self.preset > Q3DCamera.CameraPresetDirectlyBelow:
            self.preset = int(Q3DCamera.CameraPresetFrontLow)

    def changeLabelStyle(self):
        self.m_graph.activeTheme().setLabelBackgroundEnabled(
            not self.m_graph.activeTheme().isLabelBackgroundEnabled())

    def changeFont(self, font):
        newFont = QFont(font)
        newFont.setPointSizeF(self.m_fontSize)
        self.m_graph.activeTheme().setFont(newFont)

    def shadowQualityUpdatedByVisual(self, sq):
        self.shadowQualityChanged.emit(int(sq))

    def changeShadowQuality(self, quality):
        sq = QAbstract3DGraph.ShadowQuality(quality)
        self.m_graph.setShadowQuality(sq)

    def setBackgroundEnabled(self, enabled):
        self.m_graph.activeTheme().setBackgroundEnabled(enabled)

    def setGridEnabled(self, enabled):
        self.m_graph.activeTheme().setGridEnabled(enabled)

    def toggleItemCount(self):
        if self.m_itemCount == self.numberOfItems:
            self.m_itemCount = self.lowerNumberOfItems
            self.m_curveDivider = self.lowerCurveDivider
        else:
            self.m_itemCount = self.numberOfItems
            self.m_curveDivider = self.curveDivider

        self.m_graph.seriesList()[0].dataProxy().resetArray(None)
        self.addData()


class PltThread(QThread):
    send_pos = pyqtSignal(list)
    send_thred = pyqtSignal(float)
    send_warn = pyqtSignal(list)

    def __init__(self, scatter_modifier):
        super(PltThread, self).__init__()
        self.scatter_modifier = scatter_modifier
        self.pos_data = [[], [], [], []]
        self.send_pos.connect(lambda x: self.update_pos(x))
        self.error_thred = 0.1
        self.camera_position = []
        self.camera_rotation = []
        self.cc_matrix = []
        self.jk = 0
        self.warn = 0
        # 自动区分目标
        self.target_class = []

    def update_pos(self, x):
        # self.pos_data[x[0]].append(x[1])
        self.pos_data[x[0]].append([[(xx[0] + xx[2]) / 2, (xx[1] + xx[3]) / 2] for xx in x[1]])
        # print("=========", self.pos_data)

        if len(self.pos_data[0]) == len(self.pos_data[1]) and len(self.pos_data[0]) == len(self.pos_data[2]) and \
                len(self.pos_data[0]) == len(self.pos_data[3]) and len(self.pos_data[0]) > 0:
            data = [self.pos_data[0][-1], self.pos_data[1][-1], self.pos_data[2][-1], self.pos_data[3][-1]]

            self.jk += 1

            links = []
            for i in range(len(data) - 1):  # 第一台相机
                for j in range(i + 1, len(data)):  # 第二台相机
                    # 第i+1台相机的所有目标
                    targetsi = data[i]
                    # 第j+1台相机的所有目标
                    targetsj = data[j]
                    # 计算两台相机所有目标之间的精度
                    for indexti, ti in enumerate(targetsi):
                        for indextj, tj in enumerate(targetsj):
                            # 误差小于0.1的视为同一个目标
                            error = self.cc_matrix[i][j].get_error_by_uv(ti, tj)
                            if error < self.error_thred:
                                links.append([str(i) + str(indexti), str(j) + str(indextj), error])

            G = nx.Graph()
            for link in links:
                u, v, d = link
                G.add_edge(u, v, weight=d)

            for target in nx.connected_components(G):  # 最大连通子图
                g = G.subgraph(target)
                edges = sorted(g.edges(data=True), key=lambda t: t[2].get('weight', 1))

                c1, c2 = edges[0][0], edges[0][1]
                while c1[0] == c2[0]:
                    c1, c2 = random.sample(target, 2)  # 随机选取子图中的两个点

                pos = (self.cc_matrix[int(c1[0])][int(c2[0])]).get_position_by_uv(data[int(c1[0])][int(c1[1])],
                                                                                  data[int(c2[0])][int(c2[1])])

                # self.scatter_modifier.addData(pos, 0)
                if pos[0] < self.warn:
                    self.send_warn.emit([*pos, len(self.pos_data[0])])
                self.point_class_pos(pos)
                print(total_count/4, *pos)

    def point_class_pos(self, pos):
        """
        自动区分目标
        Args:
            pos:

        Returns:

        """
        thred_distance = 3
        if len(self.target_class) == 0:  # 为空，代表第一个坐标添加
            self.target_class.append([0, 0, pos])
            self.scatter_modifier.addData(pos, 0)
        else:
            flag = True  # 是否添加新类 tclass[tik , color ,pos0 , pos1]
            for it, tclass in enumerate(self.target_class):
                tclass[0] += 1
                if len(tclass) == 3:
                    if self.get_distance_between_points(pos, tclass[2]) < thred_distance:
                        tclass.append(pos)
                        self.scatter_modifier.addData(pos, tclass[1])
                        tclass[0] = 0
                        flag = False
                if len(tclass) == 4:
                    if abs(self.get_distance_between_points(pos, tclass[3]) - self.get_distance_between_points(
                            tclass[2], tclass[3])) < thred_distance:
                        tclass.append(pos)
                        del tclass[2]
                        tclass[0] = 0
                        self.scatter_modifier.addData(pos, tclass[1])
                        flag = False
                if tclass[0] > 100:
                    del self.target_class[it]
            if flag:
                self.target_class.append([0, len(self.target_class), pos])
                self.scatter_modifier.addData(pos, self.target_class[-1][1])

    @staticmethod
    def get_distance_between_points(pos1, pos2):
        return np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2 + (pos1[2] - pos2[2]) ** 2)


class DetThread(QThread):
    send_img = pyqtSignal(np.ndarray)
    send_raw = pyqtSignal(np.ndarray)
    send_statistic = pyqtSignal(dict)
    # 发送信号：正在检测/暂停/停止/检测结束/错误报告
    send_msg = pyqtSignal(str)
    send_percent = pyqtSignal(int)
    send_fps = pyqtSignal(str)
    # 发送位置
    send_pos = pyqtSignal(list)

    def __init__(self, index):
        super(DetThread, self).__init__()
        self.index = index
        self.weights = './yolov5s.pt'  # 设置权重
        self.current_weight = './yolov5s.pt'  # 当前权重
        self.source = '0'  # 视频源
        self.conf_thres = 0.25  # 置信度
        self.iou_thres = 0.45  # iou
        self.jump_out = False  # 跳出循环
        self.is_continue = True  # 继续/暂停
        self.percent_length = 1000  # 进度条
        self.rate_check = True  # 是否启用延时
        self.rate = 100  # 延时HZ
        self.save_fold = './result'  # 保存文件夹

    @torch.no_grad()
    def run(self,
            imgsz=(640, 640),  # inference size (height, width)
            max_det=1000,  # maximum detections per image
            device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
            view_img=False,  # show results
            save_txt=False,  # save results to *.txt
            save_conf=False,  # save confidences in --save-txt labels
            save_crop=False,  # save cropped prediction boxes
            nosave=True,  # do not save images/videos
            classes=None,  # filter by class: --class 0, or --class 0 2 3
            agnostic_nms=False,  # class-agnostic NMS
            augment=False,  # augmented inference
            visualize=False,  # visualize features
            update=False,  # update all models
            project='runs/detect',  # save results to project/name
            name='exp',  # save results to project/name
            exist_ok=False,  # existing project/name ok, do not increment
            line_thickness=3,  # bounding box thickness (pixels)
            hide_labels=False,  # hide labels
            hide_conf=False,  # hide confidences
            half=False,  # use FP16 half-precision inference
            dnn=False,  # use OpenCV DNN for ONNX inference
            ):

        # Initialize
        # try:
        device = select_device(device)
        half &= device.type != 'cpu'  # half precision only supported on CUDA

        # Load model
        device = select_device(device)
        model = DetectMultiBackend(self.weights, device=device, dnn=dnn, fp16=half)
        stride, names, pt = model.stride, model.names, model.pt
        imgsz = check_img_size(imgsz, s=stride)  # check image size

        # Dataloader
        if self.source.isnumeric() or self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')):
            view_img = check_imshow()
            cudnn.benchmark = True  # set True to speed up constant image size inference
            dataset = LoadWebcam(self.source, img_size=imgsz, stride=stride)
            bs = len(dataset)  # batch_size
        else:
            dataset = LoadImages(self.source, img_size=imgsz, stride=stride)
            bs = 1

        # Run inference
        model.warmup(imgsz=(1 if pt else bs, 3, *imgsz))  # warmup
        # if device.type != 'cpu':
        #     model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
        count = 0
        # 跳帧检测
        jump_count = 0
        start_time = time.time()
        dataset = iter(dataset)

        while True:

            # 手动停止
            if self.jump_out:
                self.vid_cap.release()
                self.send_percent.emit(0)
                self.send_msg.emit('停止')
                if hasattr(self, 'out'):
                    self.out.release()
                break
            # 临时更换模型
            if self.current_weight != self.weights:
                # Load model

                model = DetectMultiBackend(self.weights, device=device, dnn=dnn, fp16=half)
                stride, names, pt = model.stride, model.names, model.pt
                imgsz = check_img_size(imgsz, s=stride)  # check image size

                # Run inference
                model.warmup(imgsz=(1 if pt else bs, 3, *imgsz))  # warmup
                # if device.type != 'cpu':
                #     model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
                self.current_weight = self.weights
            # 暂停开关
            if self.is_continue:
                path, img, im0s, self.vid_cap, _ = next(dataset)

                # jump_count += 1
                # if jump_count % 5 != 0:
                #     continue

                count += 1
                # 每三十帧刷新一次输出帧率
                if count % 30 == 0 and count >= 30:
                    fps = int(30 / (time.time() - start_time))
                    self.send_fps.emit('fps：' + str(fps))
                    start_time = time.time()
                if self.vid_cap:
                    percent = int(count / self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT) * self.percent_length)
                    self.send_percent.emit(percent)
                else:
                    percent = self.percent_length

                statistic_dic = {name: 0 for name in names}
                img = torch.from_numpy(img).to(device)
                img = img.half() if half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)

                pred = model(img, augment=augment, visualize=visualize)

                # Apply NMS
                pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes, agnostic_nms,
                                           max_det=max_det)

                # Process detections
                pos_list = []
                for i, det in enumerate(pred):  # detections per image
                    im0 = im0s.copy()

                    if len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
                        pos_list = det.data.cpu().numpy().tolist()

                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            c = int(cls)  # integer class
                            statistic_dic[names[c]] += 1
                            label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                            # im0 = plot_one_box_PIL(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness)  # 中文标签画框，但是耗时会增加
                            plot_one_box(xyxy, im0, label=label, color=colors(c, True),
                                         line_thickness=line_thickness)

                # 控制视频发送频率
                if self.rate_check:
                    time.sleep(1 / self.rate)

                global total_count
                total_count_lock.lock()
                total_count += 1
                total_count_lock.unlock()
                while total_count % thread_num != 0:
                    time.sleep(0.001)

                self.send_msg.emit('相机' + str(self.index) + '第' + str(count) + '帧：' + str(pos_list))
                self.send_pos.emit(pos_list)
                self.send_img.emit(im0)
                self.send_raw.emit(im0s if isinstance(im0s, np.ndarray) else im0s[0])
                self.send_statistic.emit(statistic_dic)
                # 如果自动录制
                if self.save_fold and (not nosave):
                    os.makedirs(self.save_fold, exist_ok=True)  # 路径不存在，自动保存
                    # 如果输入是图片
                    if self.vid_cap is None:
                        save_path = os.path.join(self.save_fold,
                                                 time.strftime('%Y_%m_%d_%H_%M_%S',
                                                               time.localtime()) + '.jpg')
                        cv2.imwrite(save_path, im0)
                    else:
                        if count == 1:  # 第一帧时初始化录制
                            # 以视频原始帧率进行录制
                            ori_fps = int(self.vid_cap.get(cv2.CAP_PROP_FPS))
                            if ori_fps == 0:
                                ori_fps = 25
                            # width = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                            # height = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                            width, height = im0.shape[1], im0.shape[0]
                            save_path = os.path.join(self.save_fold,
                                                     time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime()) + '.mp4')
                            self.out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), ori_fps,
                                                       (width, height))
                        self.out.write(im0)
                if percent == self.percent_length:
                    print(count)
                    self.send_percent.emit(0)
                    self.send_msg.emit('检测结束')
                    if hasattr(self, 'out'):
                        self.out.release()
                    # 正常跳出循环
                    break

        # except Exception as e:
        #     print(e)
        #     self.send_msg.emit('%s' % e)


class MainWindow(QMainWindow, Ui_MainWindow):
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)
        self.setupUi(self)

        # 轨迹数据
        self.pos_data = [[], [], [], []]

        # 轨迹屏幕
        self.screen0.hide()
        self.graph = Q3DScatter()
        self.container = QWidget.createWindowContainer(self.graph)
        self.verticalLayout_10.addWidget(self.container)
        modifier = ScatterDataModifier(self.graph)

        # 启动轨迹绘制线程
        self.plt_thread = PltThread(modifier)
        self.plt_thread.start()

        self.m_flag = False
        # 自动搜索模型
        self.modelComboBox.clear()
        self.pt_list = os.listdir('./pt')
        self.pt_list = [file for file in self.pt_list if file.endswith('.pt')]
        self.pt_list.sort(key=lambda x: os.path.getsize('./pt/' + x))
        self.modelComboBox.clear()
        self.modelComboBox.addItems(self.pt_list)
        self.qtimer_search = QTimer(self)
        self.qtimer_search.timeout.connect(lambda: self.search_pt())
        self.qtimer_search.start(2000)

        # 手动选择模型
        self.modelPushButton.clicked.connect(lambda: self.select_model())

        # 选择模型
        self.model_type = self.modelComboBox.currentText()

        # yolov5 线程1
        self.screens = [self.screen0, self.screen1, self.screen2, self.screen3, self.screen4]  # 第一个元素为主屏幕
        self.thread_pool = []
        self.fpsLabels = [self.fpsLabel1, self.fpsLabel2, self.fpsLabel3, self.fpsLabel4]
        for index in range(thread_num):
            thread = DetThread(index)
            self.thread_pool.append(thread)
        self.det_thread1 = self.thread_pool[0]
        self.det_thread2 = self.thread_pool[1]
        self.det_thread3 = self.thread_pool[2]
        self.det_thread4 = self.thread_pool[3]

        self.thread_setting()

        self.screen1.connect_customized_slot(self.s1_connect_s0)
        self.screen2.connect_customized_slot(self.s2_connect_s0)
        self.screen3.connect_customized_slot(self.s3_connect_s0)
        self.screen4.connect_customized_slot(self.s4_connect_s0)

        ##################
        self.videoRadioButton.clicked.connect(self.re_connect_event_by_video)
        self.imgRadioButton.clicked.connect(self.re_connect_event_by_video)
        self.streamRadioButton.clicked.connect(self.re_connect_event_by_stream)

        self.runCheckBox.clicked.connect(self.run_or_continue)
        self.endCheckBox.clicked.connect(self.stop)

        self.modelComboBox.currentTextChanged.connect(self.change_model)
        self.believeDoubleSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'believeDoubleSpinBox'))
        self.believeHorizontalSlider.valueChanged.connect(lambda x: self.change_val(x, 'believeHorizontalSlider'))
        self.iouDoubleSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'iouDoubleSpinBox'))
        self.iouHorizontalSlider.valueChanged.connect(lambda x: self.change_val(x, 'iouHorizontalSlider'))
        self.delaySpinBox.valueChanged.connect(lambda x: self.change_val(x, 'delaySpinBox'))
        self.delayHorizontalSlider.valueChanged.connect(lambda x: self.change_val(x, 'delayHorizontalSlider'))

        self.thredDoubleSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'thredDoubleSpinBox'))
        # self.thredHorizontalSlider.valueChanged.connect(lambda x: self.change_val(x, 'thredHorizontalSlider'))
        self.wakeSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'wake'))
        self.clearPltPushButton.clicked.connect(self.clear_plt_point)

        # 告警
        self.plt_thread.send_warn.connect(lambda x: self.show_warn(x))

        # 轨迹配置文件
        self.cameraConfigFilepushButton.clicked.connect(self.open_camera_config_file)
        self.showScatterPushButton.clicked.connect(self.show_scatter)

        self.delayCheckBox.clicked.connect(self.checkrate)
        self.load_setting()

        self.f0 = lambda x: self.show_image(x, self.screen0)
        self.f0_cnum = 0

    def show_warn(self, x):
        self.warnTextBrowser.append("<h3>第" + str(x[-1]) + "帧,发现入侵目标坐标:" + str(x[0:3]) + "</h3>")

    def clear_plt_point(self):
        self.plt_thread.target_class = []
        self.plt_thread.scatter_modifier.clearData()
        self.plt_thread.pos_data = [[], [], [], []]

    def open_camera_config_file(self):
        name, _ = QFileDialog.getOpenFileName(self, '选取相机配置文件', '.', "Json File(*.json)")
        if name:
            self.cameraConfigFileComboBox.clear()
            self.cameraConfigFileComboBox.addItem(name)
        self.load_camera_setting()

    def load_camera_setting(self):
        file_name = self.cameraConfigFileComboBox.currentText()
        config = json.load(open(file_name, 'r', encoding='utf-8'))
        self.plt_thread.error_thred = config['error_thred']
        self.thredDoubleSpinBox.setValue(self.plt_thread.error_thred)
        self.plt_thread.camera_position = [config['camera1_position'],
                                           config['camera2_position'],
                                           config['camera3_position'],
                                           config['camera4_position']]
        self.plt_thread.camera_rotation = [config['camera1_rotation'],
                                           config['camera2_rotation'],
                                           config['camera3_rotation'],
                                           config['camera4_rotation']]

        f = config['f']
        cmos_x = config['cmos_x']
        cmos_y = config['cmos_y']
        frame_w = config['frame_w']
        frame_h = config['frame_h']
        warn = config['warn']

        self.plt_thread.warn = warn
        self.plt_thread.scatter_modifier.setWarn(warn, config['y_range'], config['z_range'])

        self.plt_thread.scatter_modifier.setAxisRange(config['x_range'], config['y_range'], config['z_range'])
        self.plt_thread.cc_matrix = []
        for i in range(thread_num):
            self.plt_thread.cc_matrix.append([])
            for j in range(thread_num):
                if i == j:
                    self.plt_thread.cc_matrix[i].append(0)
                if i != j:
                    self.plt_thread.cc_matrix[i].append(cc.StereoVision(frame_w=frame_w, frame_h=frame_h,
                                                                        mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                                                                        target_img_dir="./test/", target_img_num=2,
                                                                        f=f, dx=cmos_x / frame_w, dy=cmos_y / frame_h,
                                                                        camera_xy_1=
                                                                        self.plt_thread.camera_position[i],
                                                                        camera_xy_2=
                                                                        self.plt_thread.camera_position[j],
                                                                        # 世界坐标系下的坐标
                                                                        camera_degree_1=
                                                                        self.plt_thread.camera_rotation[i],
                                                                        camera_degree_2=
                                                                        self.plt_thread.camera_rotation[j]))

    def thread_setting(self):
        # -------------------
        self.det_thread1.weights = "./pt/%s" % self.model_type  # 权重
        self.det_thread1.source = '0'  # 默认打开本机摄像头，无需保存到配置文件
        # self.det_thread1.send_raw.connect(lambda x: self.show_image(x, self.screen2))
        self.det_thread1.send_img.connect(lambda x: self.show_image(x, self.screens[1]))
        # self.det_thread1.send_statistic.connect(self.show_statistic)
        self.det_thread1.send_msg.connect(lambda x: self.show_msg(x, 0))
        self.det_thread1.send_fps.connect(lambda x: self.fpsLabels[0].setText(x))
        self.det_thread1.send_pos.connect(lambda x: self.plt_thread.send_pos.emit([0, x]))
        # -------------------
        self.det_thread2.weights = "./pt/%s" % self.model_type  # 权重
        self.det_thread2.source = '0'  # 默认打开本机摄像头，无需保存到配置文件
        # self.det_thread1.send_raw.connect(lambda x: self.show_image(x, self.screen2))
        self.det_thread2.send_img.connect(lambda x: self.show_image(x, self.screens[2]))
        # self.det_thread1.send_statistic.connect(self.show_statistic)
        self.det_thread2.send_msg.connect(lambda x: self.show_msg(x, 1))
        self.det_thread2.send_fps.connect(lambda x: self.fpsLabels[1].setText(x))
        self.det_thread2.send_pos.connect(lambda x: self.plt_thread.send_pos.emit([1, x]))
        # -------------------
        self.det_thread3.weights = "./pt/%s" % self.model_type  # 权重
        self.det_thread3.source = '0'  # 默认打开本机摄像头，无需保存到配置文件
        # self.det_thread1.send_raw.connect(lambda x: self.show_image(x, self.screen2))
        self.det_thread3.send_img.connect(lambda x: self.show_image(x, self.screens[3]))
        # self.det_thread1.send_statistic.connect(self.show_statistic)
        self.det_thread3.send_msg.connect(lambda x: self.show_msg(x, 2))
        self.det_thread3.send_fps.connect(lambda x: self.fpsLabels[2].setText(x))
        self.det_thread3.send_pos.connect(lambda x: self.plt_thread.send_pos.emit([2, x]))
        # -------------------
        self.det_thread4.weights = "./pt/%s" % self.model_type  # 权重
        self.det_thread4.source = '0'  # 默认打开本机摄像头，无需保存到配置文件
        # self.det_thread1.send_raw.connect(lambda x: self.show_image(x, self.screen2))
        self.det_thread4.send_img.connect(lambda x: self.show_image(x, self.screens[4]))
        # self.det_thread1.send_statistic.connect(self.show_statistic)
        self.det_thread4.send_msg.connect(lambda x: self.show_msg(x, 3))
        self.det_thread4.send_fps.connect(lambda x: self.fpsLabels[3].setText(x))
        self.det_thread4.send_pos.connect(lambda x: self.plt_thread.send_pos.emit([3, x]))
        # -------------------

    def show_msg(self, msg, index):
        if index == 0:
            self.plainTextEdit.appendPlainText(msg)
        if index == 1:
            self.plainTextEdit_2.appendPlainText(msg)
        if index == 2:
            self.plainTextEdit_3.appendPlainText(msg)
        if index == 3:
            self.plainTextEdit_4.appendPlainText(msg)

    def select_model(self):
        name, _ = QFileDialog.getOpenFileName(self, '选取视频或图片', "./pt", "Weight File(*.pt)")
        self.modelComboBox.setCurrentText(os.path.basename(name))

    def show_scatter(self):
        self.container.show()
        self.screen0.hide()

    def s1_connect_s0(self):
        self.container.hide()
        self.screen0.show()
        if self.f0_cnum == 2:
            self.det_thread2.send_img.disconnect(self.f0)
        if self.f0_cnum == 3:
            self.det_thread3.send_img.disconnect(self.f0)
        if self.f0_cnum == 4:
            self.det_thread4.send_img.disconnect(self.f0)
        self.det_thread1.send_img.connect(self.f0)
        self.f0_cnum = 1

    def s2_connect_s0(self):
        self.container.hide()
        self.screen0.show()
        if self.f0_cnum == 1:
            self.det_thread1.send_img.disconnect(self.f0)
        if self.f0_cnum == 3:
            self.det_thread3.send_img.disconnect(self.f0)
        if self.f0_cnum == 4:
            self.det_thread4.send_img.disconnect(self.f0)
        self.det_thread2.send_img.connect(self.f0)
        self.f0_cnum = 2

    def s3_connect_s0(self):
        self.container.hide()
        self.screen0.show()
        if self.f0_cnum == 1:
            self.det_thread1.send_img.disconnect(self.f0)
        if self.f0_cnum == 2:
            self.det_thread2.send_img.disconnect(self.f0)
        if self.f0_cnum == 4:
            self.det_thread4.send_img.disconnect(self.f0)
        self.det_thread3.send_img.connect(self.f0)
        self.f0_cnum = 3

    def s4_connect_s0(self):
        self.container.hide()
        self.screen0.show()
        if self.f0_cnum == 1:
            self.det_thread1.send_img.disconnect(self.f0)
        if self.f0_cnum == 2:
            self.det_thread2.send_img.disconnect(self.f0)
        if self.f0_cnum == 3:
            self.det_thread3.send_img.disconnect(self.f0)
        self.det_thread4.send_img.connect(self.f0)
        self.f0_cnum = 4

    # 导入配置文件
    def load_setting(self):
        config_file = './config/setting.json'
        if not os.path.exists(config_file):
            iou = 0.26
            conf = 0.33
            rate = 10
            check = 0
            savecheck = 0
            new_config = {"iou": iou,
                          "conf": conf,
                          "rate": rate,
                          "check": check,
                          "savecheck": savecheck
                          }
            new_json = json.dumps(new_config, ensure_ascii=False, indent=2)
            with open(config_file, 'w', encoding='utf-8') as f:
                f.write(new_json)
        else:
            config = json.load(open(config_file, 'r', encoding='utf-8'))
            if len(config) != 5:
                iou = 0.26
                conf = 0.33
                rate = 10
                check = 0
                savecheck = 0
            else:
                iou = config['iou']
                conf = config['conf']
                rate = config['rate']
                check = config['check']
                savecheck = config['savecheck']
        self.believeDoubleSpinBox.setValue(conf)
        self.iouDoubleSpinBox.setValue(iou)
        self.delaySpinBox.setValue(rate)
        self.delayCheckBox.setCheckState(check)
        self.det_thread1.rate_check = check  # 是否启用延时
        self.det_thread2.rate_check = check  # 是否启用延时
        self.det_thread3.rate_check = check  # 是否启用延时
        self.det_thread4.rate_check = check  # 是否启用延时

        # self.saveCheckBox.setCheckState(savecheck)
        # self.is_save()                              # 是否自动保存

    def change_val(self, x, flag):
        if flag == 'believeDoubleSpinBox':
            self.believeHorizontalSlider.setValue(int(x * 100))
        elif flag == 'believeHorizontalSlider':
            self.believeDoubleSpinBox.setValue(x / 100)
            self.det_thread1.conf_thres = x / 100
            self.det_thread2.conf_thres = x / 100
            self.det_thread3.conf_thres = x / 100
            self.det_thread4.conf_thres = x / 100
        elif flag == 'iouDoubleSpinBox':
            self.iouHorizontalSlider.setValue(int(x * 100))
        elif flag == 'iouHorizontalSlider':
            self.iouDoubleSpinBox.setValue(x / 100)
            self.det_thread1.iou_thres = x / 100
            self.det_thread2.iou_thres = x / 100
            self.det_thread3.iou_thres = x / 100
            self.det_thread4.iou_thres = x / 100
        elif flag == 'delaySpinBox':
            self.delayHorizontalSlider.setValue(x)
        elif flag == 'delayHorizontalSlider':
            self.delaySpinBox.setValue(x)
            self.det_thread1.rate = x * 10
            self.det_thread2.rate = x * 10
            self.det_thread3.rate = x * 10
            self.det_thread4.rate = x * 10
        elif flag == 'thredDoubleSpinBox':
            # self.thredHorizontalSlider.setValue(int(x * 100))
            self.plt_thread.error_thred = x
        elif flag == 'wake':
            self.plt_thread.scatter_modifier.wake_particle = int(x)
        else:
            pass

    def checkrate(self):
        if self.delayCheckBox.isChecked():
            # 选中时
            self.det_thread1.rate_check = True
            self.det_thread2.rate_check = True
            self.det_thread3.rate_check = True
            self.det_thread4.rate_check = True
        else:
            self.det_thread1.rate_check = False
            self.det_thread2.rate_check = False
            self.det_thread3.rate_check = False
            self.det_thread4.rate_check = False

    def re_connect_event_by_video(self):
        self.imput1PushButton.clicked.connect(lambda: self.open_file(0))
        self.input2PushButton.clicked.connect(lambda: self.open_file(1))
        self.input3PushButton.clicked.connect(lambda: self.open_file(2))
        self.input4PushButton.clicked.connect(lambda: self.open_file(3))

    def re_connect_event_by_stream(self):
        self.imput1PushButton.setDisabled(True)
        self.input2PushButton.setDisabled(True)
        self.input3PushButton.setDisabled(True)
        self.input4PushButton.setDisabled(True)
        self.lineEdit.setText("请输入视频流地址")
        self.lineEdit_2.setText("请输入视频流地址")
        self.lineEdit_3.setText("请输入视频流地址")
        self.lineEdit_4.setText("请输入视频流地址")

    def open_file(self, index):
        config_file = 'config/fold.json'
        config = json.load(open(config_file, 'r', encoding='utf-8'))
        open_fold = config['open_fold' + str(index)]
        if not os.path.exists(open_fold):
            open_fold = os.getcwd()
        name, _ = QFileDialog.getOpenFileName(self, '选取视频或图片', open_fold, "Pic File(*.mp4 *.mkv *.avi *.flv "
                                                                          "*.jpg *.png)")
        if name:
            self.thread_pool[index].source = name
            self.statistic_msg('线程' + str(index) + '加载文件：{}'.format(name))
            if index == 0:
                self.lineEdit.clear()
                self.lineEdit.setText(name)
            if index == 1:
                self.lineEdit_2.clear()
                self.lineEdit_2.setText(name)
            if index == 2:
                self.lineEdit_3.clear()
                self.lineEdit_3.setText(name)
            if index == 3:
                self.lineEdit_4.clear()
                self.lineEdit_4.setText(name)
            config['open_fold' + str(index)] = os.path.dirname(name)
            config_json = json.dumps(config, ensure_ascii=False, indent=2)
            with open(config_file, 'w', encoding='utf-8') as f:
                f.write(config_json)
            # 切换文件后，上一次检测停止
            # self.stop()
            self.thread_pool[index].jump_out = True

    def change_model(self, x):
        self.model_type = self.modelComboBox.currentText()
        for thread in self.thread_pool:
            thread.weights = "./pt/%s" % self.model_type
            thread.weights = "./pt/%s" % self.model_type
            thread.weights = "./pt/%s" % self.model_type
            thread.weights = "./pt/%s" % self.model_type
        self.statistic_msg('模型切换为%s' % x)

    def search_pt(self):
        pt_list = os.listdir('./pt')
        pt_list = [file for file in pt_list if file.endswith('.pt')]
        pt_list.sort(key=lambda x: os.path.getsize('./pt/' + x))

        if pt_list != self.pt_list:
            self.pt_list = pt_list
            self.modelComboBox.clear()
            self.modelComboBox.addItems(self.pt_list)

    def statistic_msg(self, msg):
        self.statisticpPlainTextEdit.appendPlainText(msg)

    # 退出检测循环
    def stop(self):
        self.det_thread1.jump_out = True
        self.det_thread2.jump_out = True
        self.det_thread3.jump_out = True
        self.det_thread4.jump_out = True
        if self.f0_cnum == 1:
            self.det_thread1.send_img.disconnect(self.f0)
        if self.f0_cnum == 2:
            self.det_thread2.send_img.disconnect(self.f0)
        if self.f0_cnum == 3:
            self.det_thread3.send_img.disconnect(self.f0)
        if self.f0_cnum == 4:
            self.det_thread4.send_img.disconnect(self.f0)
        self.f0_cnum = 0
        global total_count
        total_count = 0
        # self.saveCheckBox.setEnabled(True)

    @staticmethod
    def show_image(img_src, label):
        try:
            ih, iw, _ = img_src.shape
            w = label.geometry().width()
            h = label.geometry().height()
            # 保持纵横比
            # 找出长边
            if iw > ih:
                scal = w / iw
                nw = w
                nh = int(scal * ih)
                img_src_ = cv2.resize(img_src, (nw, nh))

            else:
                scal = h / ih
                nw = int(scal * iw)
                nh = h
                img_src_ = cv2.resize(img_src, (nw, nh))

            frame = cv2.cvtColor(img_src_, cv2.COLOR_BGR2RGB)
            img = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[2] * frame.shape[1],
                         QImage.Format_RGB888)
            label.setPixmap(QPixmap.fromImage(img))

        except Exception as e:
            print(repr(e))

    # 继续/暂停
    def run_or_continue(self):
        self.det_thread1.jump_out = False
        self.det_thread2.jump_out = False
        self.det_thread3.jump_out = False
        self.det_thread4.jump_out = False
        if self.runCheckBox.isChecked():
            # self.saveCheckBox.setEnabled(False)
            self.det_thread1.is_continue = True
            self.det_thread2.is_continue = True
            self.det_thread3.is_continue = True
            self.det_thread4.is_continue = True
            if not self.det_thread1.isRunning():
                self.det_thread1.start()
                self.det_thread2.start()
                self.det_thread3.start()
                self.det_thread4.start()

            source = os.path.basename(self.det_thread1.source)
            source = '摄像头设备' if source.isnumeric() else source
            self.statistic_msg('正在检测 >> 模型：{}，文件：{}'.
                               format(os.path.basename(self.det_thread1.weights),
                                      source))
        else:
            self.det_thread1.is_continue = False
            self.det_thread2.is_continue = False
            self.det_thread3.is_continue = False
            self.det_thread4.is_continue = False
            self.statistic_msg('暂停')


if __name__ == "__main__":
    app = QApplication(sys.argv)
    myWin = MainWindow()
    myWin.show()
    sys.exit(app.exec_())
