import shutil

import openpyxl
import pymysql
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMenu, QAction, QDialog, QLabel, QPushButton
from openpyxl.utils import get_column_letter

from setdata_win import setdata_window
from user_win import user_window
from visual_win import visual_window
from main_win.win import Ui_mainWindow
from PyQt5.QtCore import Qt, QPoint, QTimer, QThread, pyqtSignal, QRect, qDebug
from PyQt5.QtGui import QImage, QPixmap, QPainter, QIcon, QCursor

import sys
import os
import json
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import os
import time
import cv2

from models.experimental import attempt_load
from utils.datasets import LoadImages, LoadWebcam
from utils.CustomMessageBox import MessageBox
from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \
    apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
# from utils.plots import colors, plot_one_box, plot_one_box_PIL
from utils.plots import Annotator, colors, save_one_box

from utils.torch_utils import select_device
from utils.capnums import Camera
from dialog.rtsp_win import Window

import os, PyQt5
dirname = os.path.dirname(PyQt5.__file__)
qt_dir = os.path.join(dirname, 'Qt5', 'plugins', 'platforms')
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = qt_dir



# Multithreading programming
class DetThread(QThread):
    # send img(video) of the result
    send_img = pyqtSignal(np.ndarray)
    # send raw img(video) of the result
    send_raw = pyqtSignal(np.ndarray)
    # send plain text result
    send_statistic = pyqtSignal(dict, int)
    # emit：detecting/pause/stop/finished/error msg
    send_msg = pyqtSignal(str)
    # send percent progress bar position
    send_percent = pyqtSignal(int)
    # send fps
    send_fps = pyqtSignal(str)

    def __init__(self):
        super(DetThread, self).__init__()
        self.weights = './yolov5s.pt'
        self.current_weight = './yolov5s.pt'
        self.source = '0'
        self.conf_thres = 0.25
        self.iou_thres = 0.45
        self.jump_out = False  # jump out of the loop
        self.is_continue = True  # continue/pause
        self.percent_length = 1000  # progress bar
        self.rate_check = True  # Whether to enable delay
        self.rate = 100
        self.save_fold = './result'
        self.statistic_dic = {}
        self.dataCnt = 1  # send text data times

#在这个函数定义中，@torch.no_grad() 装饰器确保了 inference_function 函数中所有操作都不会追踪梯度，适用于模型推理,加快速度以及减少内存使用
    @torch.no_grad()
    def run(self,
            imgsz=640,  # inference size (pixels)
            max_det=1000,  # maximum detections per image
            device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
            view_img=True,  # show results
            save_txt=False,  # save results to *.txt
            save_conf=False,  # save confidences in --save-txt labels
            save_crop=False,  # save cropped prediction boxes
            nosave=False,  # do not save images/videos
            classes=None,  # filter by class: --class 0, or --class 0 2 3
            agnostic_nms=False,  # class-agnostic NMS
            augment=False,  # augmented inference
            visualize=False,  # visualize features
            update=False,  # update all models
            project='runs/detect',  # save results to project/name
            name='exp',  # save results to project/name
            exist_ok=False,  # existing project/name ok, do not increment
            line_thickness=3,  # bounding box thickness (pixels)
            hide_labels=False,  # hide labels
            hide_conf=False,  # hide confidences
            half=False,  # use FP16 half-precision inference
            ):

        # Initialize
        try:
            device = select_device(device)
            # half precision only supported on CUDA
            half &= device.type != 'cpu'

            # Load model
            model = attempt_load(self.weights, map_location=device)  # load FP32 model
            num_params = 0
            for param in model.parameters():
                num_params += param.numel()
            # model stride
            stride = int(model.stride.max())
            # check image size
            imgsz = check_img_size(imgsz, s=stride)
            names = model.module.names if hasattr(model, 'module') else model.names  # get class names
            if half:
                model.half()  # to FP16

            # Dataloader
            if self.source.isnumeric() or self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')):
                view_img = check_imshow()
                # set True to speed up constant image size inference
                cudnn.benchmark = True
                dataset = LoadWebcam(self.source, img_size=imgsz, stride=stride)
                # bs = len(dataset)  # batch_size
            else:
                dataset = LoadImages(self.source, img_size=imgsz, stride=stride)

            # Run inference
            if device.type != 'cpu':
                model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
            count = 0
            jump_count = 0
            # detect start time
            start_time = time.time()
            # obtain a 1 second range
            startt = time.time()
            # detect parameter
            dataset = iter(dataset)
            # Loop execution
            while True:
                # paused
                if self.jump_out:
                    self.vid_cap.release()
                    self.send_percent.emit(0)
                    self.send_msg.emit('Stop')
                    if hasattr(self, 'out'):
                        self.out.release()
                    break
                # change model
                if self.current_weight != self.weights:
                    # Load model
                    model = attempt_load(self.weights, map_location=device)  # load FP32 model
                    num_params = 0
                    for param in model.parameters():
                        num_params += param.numel()
                    # model stride
                    stride = int(model.stride.max())
                    # check image size
                    imgsz = check_img_size(imgsz, s=stride)
                    # get class names
                    names = model.module.names if hasattr(model, 'module') else model.names
                    if half:
                        model.half()  # to FP16
                    # Run inference
                    if device.type != 'cpu':
                        model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
                    self.current_weight = self.weights
                if self.is_continue:
                    path, img, im0s, self.vid_cap = next(dataset)
                    # jump_count += 1
                    # if jump_count % 5 != 0:
                    #     continue
                    count += 1
                    # calculate fps
                    if count % 30 == 0 and count >= 30:
                        fps = int(30 / (time.time() - start_time))
                        # send fps msg
                        self.send_fps.emit('fps：' + str(fps))
                        start_time = time.time()
                    # video process bar
                    if self.vid_cap:
                        percent = int(count / self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT) * self.percent_length)
                        self.send_percent.emit(percent)
                    # not video set the bar full
                    else:
                        percent = self.percent_length
                    # the current frame's detect result
                    newname = {name: 0 for name in names}
                    img = torch.from_numpy(img).to(device)
                    # uint8 to fp16/32
                    img = img.half() if half else img.float()
                    img /= 255.0  # 0 - 255 to 0.0 - 1.0
                    if img.ndimension() == 3:
                        img = img.unsqueeze(0)
                    pred = model(img, augment=augment)[0]
                    # Apply NMS
                    pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes, agnostic_nms,
                                               max_det=max_det)
                    # Process detections
                    for i, det in enumerate(pred):  # detections per image
                        im0 = im0s.copy()
                        annotator = Annotator(im0, line_width=line_thickness, example=str(names))
                        if len(det):
                            # Rescale boxes from img_size to im0 size
                            det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                            # Write results
                            for *xyxy, conf, cls in reversed(det):
                                c = int(cls)  # integer class
                                newname[names[c]] += 1
                                label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                                annotator.box_label(xyxy, label, color=colors(c, True))
                    # use delay
                    if self.rate_check:
                        time.sleep(1 / self.rate)
                    # img of detect result
                    im0 = annotator.result()
                    # send img of the result
                    self.send_img.emit(im0)
                    # send raw of the detect
                    self.send_raw.emit(im0s if isinstance(im0s, np.ndarray) else im0s[0])
                    # Set the number of objects to the maximum in the second
                    for key in newname.keys():
                        if key in self.statistic_dic.keys():
                            self.statistic_dic[key] = max(self.statistic_dic[key], newname[key])
                        else:
                            self.statistic_dic[key] = newname[key]
                    # send text result per second
                    if time.time() - startt > 1:
                        startt = time.time()
                        self.dataCnt += 1
                        self.send_statistic.emit(self.statistic_dic, self.dataCnt)
                    # just a img
                    if self.vid_cap is None:
                        self.send_statistic.emit(self.statistic_dic, 2)
                    # save the detect result
                    if self.save_fold:
                        os.makedirs(self.save_fold, exist_ok=True)
                        if self.vid_cap is None:
                            # just save a img
                            save_path = os.path.join(self.save_fold,
                                                     time.strftime('%Y_%m_%d_%H_%M_%S',
                                                                   time.localtime()) + '.jpg')
                            # use opencv to guarantee clarity
                            cv2.imwrite(save_path, im0)
                        # save videos
                        else:
                            if count == 1:
                                # video fps
                                ori_fps = int(self.vid_cap.get(cv2.CAP_PROP_FPS))
                                if ori_fps == 0:
                                    ori_fps = 25
                                # width = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                                # height = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                                width, height = im0.shape[1], im0.shape[0]
                                save_path = os.path.join(self.save_fold,
                                                         time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime()) + '.mp4')
                                # use opencv process the images to video
                                self.out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), ori_fps,
                                                           (width, height))
                            self.out.write(im0)
                    # detect video finish
                    if percent == self.percent_length:
                        print(count)
                        self.send_percent.emit(0)
                        self.send_msg.emit('finished')
                        if hasattr(self, 'out'):
                            self.out.release()
                        break
        except Exception as e:
            self.send_msg.emit('%s' % e)


# how far the cursor is from the window border triggers zooming
PADDING = 10


class MainWindow(QMainWindow, Ui_mainWindow):
    # connect to mysql
    mysql = pymysql.connect(
        host='127.0.0.1',
        user='root',
        password='123456',
        port=3306,
        db='imagerecognition',
        autocommit=True,
    )

    #操纵数据库的游标
    cursor = mysql.cursor()
    # search for user according to id
    sql = "SELECT * FROM users WHERE id=%s"
    # if not login
    send_login = pyqtSignal()

    tip_time1 = 0
    tip_time2 = 0
    tip_time3 = 0
    tip_time4 = 0

    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)
        self.setupUi(self)
        # user id according to login
        self.userid = 1
        # 1 -> login 0-> logout
        self.checklog = 1
        # logoutButton signal connect to slot
        self.logout.clicked.connect(self.log_out)
        # userdataButton signal connect to slot
        self.userdata.clicked.connect(self.show_userdata)
        # include a userdata window
        self.userdata_win = user_window()
        # include a setdata window
        self.setdata_win = setdata_window()
        # setdata button signal connect to slot
        self.userdata_win.pushButton.clicked.connect(self.setdata_win.show)
        # data change signal connect to slot
        self.setdata_win.send_change.connect(self.loaduserdata)

#总结来说，这段代码在 MainWindow 类中设置了用户界面、用户登录状态、按钮信号和槽函数的连接，以及创建了用于显示和设置用户数据的子窗口。这是Qt应用程序中常见的模式，用于响应用户交互和更新应用程序的状态。

        # no border and transparent
        self.setAttribute(Qt.WA_TranslucentBackground)
        self.setWindowFlags(Qt.FramelessWindowHint)
        # border width
        self.SHADOW_WIDTH = 0
        # if mouse left button is down
        self.isLeftPressDown = False
        # position during  dragging
        self.dragPosition = 0  # 拖动时坐标
        # original mouse state
        self.dir = 8
        self.setMouseTracking(True)

#这段代码的目的是配置一个无边框、背景透明的窗口，它可以响应鼠标事件，可能用于创建自定义的拖动行为或其他交互效果。通过设置这些属性和标志，开发者可以创建具有特殊视觉效果和行为的窗口。

        # minButton signal connect to slot
        self.minButton.clicked.connect(self.showMinimized)
        # maxButton signal connect to slot
        self.maxButton.clicked.connect(self.max_or_restore)
        # show Maximized window
        # self.maxButton.animateClick(10)
        self.closeButton.clicked.connect(self.close)

#7.4留档



        # timer
        self.qtimer = QTimer(self)
        self.qtimer.setSingleShot(True)
        self.qtimer.timeout.connect(lambda: self.statistic_label.clear())

        # search models automatically
        # clear the comboBox of models
        self.comboBox.clear()
        # search or .pt file under ./pt
        self.pt_list = os.listdir('./pt')
        self.pt_list = [file for file in self.pt_list if file.endswith('.pt')]
        self.pt_list.sort(key=lambda x: os.path.getsize('./pt/' + x))
        # add the pts to comboBox
        self.comboBox.clear()
        self.comboBox.addItems(self.pt_list)
        self.qtimer_search = QTimer(self)
        self.qtimer_search.timeout.connect(lambda: self.search_pt())
        self.qtimer_search.start(2000)

        # include a visual window
        self.visual_window = visual_window()
        self.popup_dialog = None

        # yolov5 thread
        self.det_thread = DetThread()
        # set model type
        self.model_type = self.comboBox.currentText()
        # set model file path
        self.det_thread.weights = "./pt/%s" % self.model_type
        # set source type default:camera
        self.det_thread.source = '0'
        # set pprocess bar pos
        self.det_thread.percent_length = self.progressBar.maximum()
        # show raw img/video
        self.det_thread.send_raw.connect(lambda x: self.show_image(x, self.raw_video))
        # show result img/video
        self.det_thread.send_img.connect(lambda x: self.show_image(x, self.out_video))
        # refresh visual img
        self.det_thread.send_statistic.connect(self.visual_window.refresh)
        # show text data and save
        self.det_thread.send_statistic.connect(self.show_statistic)
        # show msg
        self.det_thread.send_msg.connect(lambda x: self.show_msg(x))
        # show process bar pos
        self.det_thread.send_percent.connect(lambda x: self.progressBar.setValue(x))
        # show fps
        self.det_thread.send_fps.connect(lambda x: self.fpslabel.setText(x))

        # import localhost file
        self.fileButton.clicked.connect(self.open_file)
        # import camera
        self.cameraButton.clicked.connect(self.chose_cam)
        # import live streaming
        self.videoButton.clicked.connect(self.chose_rtsp)
        # chose export file type
        self.exportbox.clicked.connect(self.chose_tool)
        # start / pause / continue
        self.runButton.clicked.connect(self.run_or_continue)
        # stop
        self.stopButton.clicked.connect(self.stop)
        # open visual window
        self.visual.clicked.connect(self.open_visualwin)

        # change model
        self.comboBox.currentTextChanged.connect(self.change_model)
        # change conf dynamically by spinbox
        self.confSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'confSpinBox'))
        # change conf dynamically by slider
        self.confSlider.valueChanged.connect(lambda x: self.change_val(x, 'confSlider'))
        # change iou dynamically by spinbox
        self.iouSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'iouSpinBox'))
        # change iou dynamically by slider
        self.iouSlider.valueChanged.connect(lambda x: self.change_val(x, 'iouSlider'))
        # change delay dynamically by spinbox
        self.rateSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'rateSpinBox'))
        # change delay dynamically ny slider
        self.rateSlider.valueChanged.connect(lambda x: self.change_val(x, 'rateSlider'))

        # use delay or not
        self.checkBox.clicked.connect(self.checkrate)
        # default save files
        self.is_save()
        self.load_setting()
        if MainWindow.tip_time1 == 0:
            MainWindow.tip_time1 += 1
            QTimer.singleShot(5000, self.show_popup_dialog3)

    def loaduserdata(self):
        # login successfully
        self.checklog = 1
        # deliver id to setdata window and uerdata window
        self.setdata_win.userid = self.userid
        self.userdata_win.userid = self.userid
        self.cursor.execute(self.sql, self.userid)
        # search or the id from mysql and show
        a = self.cursor.fetchone()
        if a is not None:
            self.username.setText(a[1])
        # lode userdata window text
        self.userdata_win.load()

    def show_userdata(self):
        # if login
        if self.checklog == 1:
            self.userdata_win.userid = self.userid
            self.userdata_win.load()
            self.userdata_win.show()
        # not login
        else:
            MessageBox(self.closeButton, title='Tips', text='Please Login!', time=1500, auto=True).exec_()
            # use signal to open a login window
            self.send_login.emit()

    def log_out(self):
        MessageBox(self.closeButton, title='Tips', text='Logout Successfully!', time=1500, auto=True).exec_()
        # logout renew flag
        self.checklog = 0
        self.username.setText('UserName')

    # four different kind of export file type
    def chose_tool(self):
        try:
            # stop detect first
            self.stop()
            # use menu to show the options
            popMenu = QMenu()
            popMenu.setFixedWidth(140)
            # four types of export file
            img_tool = QAction('Result_img')
            txt_tool = QAction('Result_txt')
            excel_tool = QAction('Result_excel')
            visual_tool = QAction('Result_vistal')
            # add actions for options
            popMenu.addAction(img_tool)
            popMenu.addAction(txt_tool)
            popMenu.addAction(excel_tool)
            popMenu.addAction(visual_tool)
            # set style sheet
            popMenu.setStyleSheet('''
                                            QMenu {
                                            font-size: 16px;
                                            font-family: "Microsoft YaHei UI";
                                            font-weight: light;
                                            color:white;
                                            padding-left: 5px;
                                            padding-right: 5px;
                                            padding-top: 4px;
                                            padding-bottom: 4px;
                                            border-style: solid;
                                            border-width: 0px;
                                            border-color: rgba(255, 255, 255, 255);
                                            border-radius: 10px;
                                            background-color: rgba(200, 200, 200,50);}
                                            ''')
            # set position
            x = self.groupBox_5.mapToGlobal(self.cameraButton.pos()).x()
            y = self.groupBox_5.mapToGlobal(self.cameraButton.pos()).y() + 220
            x = x + self.cameraButton.frameGeometry().width()
            pos = QPoint(x, y)
            action = popMenu.exec_(pos)
            # options be active
            if action:
                self.statistic_msg('Loading ：{}'.format(action.text()))
                if action.text() == 'Result_img':
                    self.export('./result')
                elif action.text() == 'Result_txt':
                    self.export('./labels')
                elif action.text() == 'Result_excel':
                    self.export('./excel')
                # use API to export visual img
                else:
                    self.visual_window.export()
        except Exception as e:
            self.statistic_msg('%s' % e)

    # copy the already saved file when detecting and showing text data to selected file path
    def export(self, path):
        # obtain the newest file under correspond file path
        lists = os.listdir(path)
        lists.sort(key=lambda x: os.path.getmtime((path + '/' + x)))
        refile = lists[-1]
        # select a path
        filepath, _ = QFileDialog.getSaveFileName(self, 'Save Result', refile, "*.*")
        # copy
        shutil.copy(path + '/' + refile, filepath)
        # pop out a  prompting window
        MessageBox(self.closeButton, title='Tips', text='Save Successfully', time=1500, auto=True).exec_()

    # search .pt file under the project path
    def search_pt(self):
        pt_list = os.listdir('./pt')
        pt_list = [file for file in pt_list if file.endswith('.pt')]
        pt_list.sort(key=lambda x: os.path.getsize('./pt/' + x))
        # add the item to combobox
        if pt_list != self.pt_list:
            self.pt_list = pt_list
            self.comboBox.clear()
            self.comboBox.addItems(self.pt_list)

    # img/video defaultly save to ./result
    def is_save(self):
        self.det_thread.save_fold = './result'

    # use delay or not
    def checkrate(self):
        if self.checkBox.isChecked():
            self.det_thread.rate_check = True
        else:
            self.det_thread.rate_check = False

    # open visual window
    def open_visualwin(self):
        self.visual_window.show();

    def chose_rtsp(self):
        # a new detect ,reset all the export file
        self.resetOutFile()
        self.rtsp_window = Window()
        # open json
        config_file = 'config/ip.json'
        # lack json file
        if not os.path.exists(config_file):
            # add a default json file
            ip = "rtsp://admin:admin888@192.168.1.67:555"
            new_config = {"ip": ip}
            new_json = json.dumps(new_config, ensure_ascii=False, indent=2)
            with open(config_file, 'w', encoding='utf-8') as f:
                f.write(new_json)
        # json file exist ,load the ip in json
        else:
            config = json.load(open(config_file, 'r', encoding='utf-8'))
            ip = config['ip']
        # set the ip to streaming chose window and show
        self.rtsp_window.rtspEdit.setText(ip)
        self.rtsp_window.show()
        # load the ip in the line edit to detect parameter
        self.rtsp_window.rtspButton.clicked.connect(lambda: self.load_rtsp(self.rtsp_window.rtspEdit.text()))
        if MainWindow.tip_time2 == 0:
            MainWindow.tip_time2 += 1
            QTimer.singleShot(4000, self.show_popup_dialog1)

    # load the ip in the line edit to detect parameter
    def load_rtsp(self, ip):
        try:
            # stop the last detect first
            self.stop()
            # pop out a prompting window
            MessageBox(
                self.closeButton, title='Tips', text='Loading live stream', time=1000, auto=True).exec_()
            # set detect ip parameter
            self.det_thread.source = ip
            # set present ip into json
            new_config = {"ip": ip}
            new_json = json.dumps(new_config, ensure_ascii=False, indent=2)
            with open('config/ip.json', 'w', encoding='utf-8') as f:
                f.write(new_json)
            # status bar info
            self.statistic_msg('Loading stream {}'. format(ip))
            self.rtsp_window.close()
        except Exception as e:
            self.statistic_msg('%s' % e)

    def chose_cam(self):
        # a new detect ,reset all the export file
        self.resetOutFile()
        try:
            # stop the last detect window
            self.stop()
            # pop out a prompting window
            MessageBox(
                self.closeButton, title='Tips', text='Loading camera', time=2000, auto=True).exec_()
            # get the number of local cameras
            _, cams = Camera().get_cam_num()
            # use menu to show the camera options
            popMenu = QMenu()
            popMenu.setFixedWidth(50)
            popMenu.setStyleSheet('''
                                            QMenu {
                                            font-size: 16px;
                                            font-family: "Microsoft YaHei UI";
                                            font-weight: light;
                                            color:white;
                                            padding-left: 5px;
                                            padding-right: 5px;
                                            padding-top: 4px;
                                            padding-bottom: 4px;
                                            border-style: solid;
                                            border-width: 0px;
                                            border-color: rgba(255, 255, 255, 255);
                                            border-radius: 10px;
                                            background-color: rgba(200, 200, 200,50);}
                                            ''')
            # set actions
            for cam in cams:
                exec("action_%s = QAction('%s')" % (cam, cam))
                exec("popMenu.addAction(action_%s)" % cam)
            # set positions
            x = self.groupBox_5.mapToGlobal(self.cameraButton.pos()).x()
            y = self.groupBox_5.mapToGlobal(self.cameraButton.pos()).y() - 50
            x = x + self.cameraButton.frameGeometry().width()
            pos = QPoint(x, y)
            action = popMenu.exec_(pos)
            # action be active
            if action:
                self.det_thread.source = action.text()
                self.statistic_msg('Loading camera：{}'.format(action.text()))
        except Exception as e:
            self.statistic_msg('%s' % e)
        if MainWindow.tip_time2 == 0:
            MainWindow.tip_time2 += 1
            QTimer.singleShot(4000, self.show_popup_dialog1)

    # load the essential parameters
    def load_setting(self):
        # open json file
        config_file = 'config/setting.json'
        # json file lack
        if not os.path.exists(config_file):
            iou = 0.26
            conf = 0.33
            rate = 10
            check = 0
            new_config = {"iou": iou,
                          "conf": conf,
                          "rate": rate,
                          "check": check,
                          }
            new_json = json.dumps(new_config, ensure_ascii=False, indent=2)
            # create a default json file
            with open(config_file, 'w', encoding='utf-8') as f:
                f.write(new_json)
        # json file exist
        else:
            config = json.load(open(config_file, 'r', encoding='utf-8'))
            # json file contents lack
            if len(config) != 4:
                iou = 0.5
                conf = 0.6
                rate = 10
                check = 0
            # load the parameters in json file
            else:
                iou = config['iou']
                conf = config['conf']
                rate = config['rate']
                check = config['check']
        # set the detect parameters
        self.confSpinBox.setValue(conf)
        self.iouSpinBox.setValue(iou)
        self.rateSpinBox.setValue(rate)
        self.checkBox.setCheckState(check)
        self.det_thread.rate_check = check
        self.is_save()

    # change parameters dynamically
    def change_val(self, x, flag):
        if flag == 'confSpinBox':
            self.confSlider.setValue(int(x * 100))
        elif flag == 'confSlider':
            self.confSpinBox.setValue(x / 100)
            self.det_thread.conf_thres = x / 100
        elif flag == 'iouSpinBox':
            self.iouSlider.setValue(int(x * 100))
        elif flag == 'iouSlider':
            self.iouSpinBox.setValue(x / 100)
            self.det_thread.iou_thres = x / 100
        elif flag == 'rateSpinBox':
            self.rateSlider.setValue(x)
        elif flag == 'rateSlider':
            self.rateSpinBox.setValue(x)
            self.det_thread.rate = x * 10
        else:
            pass

    # show status bar msg
    def statistic_msg(self, msg):
        self.statistic_label.setText(msg)
        # self.qtimer.start(3000)

    # show the msg send by detect thread on status bar
    def show_msg(self, msg):
        self.runButton.setChecked(Qt.Unchecked)
        self.statistic_msg(msg)

    # change model dynamically
    def change_model(self, x):
        self.model_type = self.comboBox.currentText()
        self.det_thread.weights = "./pt/%s" % self.model_type
        # show msg on status bar
        self.statistic_msg('Change model to %s' % x)

    # import localhost file
    def open_file(self):
        # a new detect ,reset all the export file
        self.resetOutFile()
        # open json file
        config_file = 'config/fold.json'
        config = json.load(open(config_file, 'r', encoding='utf-8'))
        open_fold = config['open_fold']
        # json file lack ,open default path
        if not os.path.exists(open_fold):
            open_fold = os.getcwd()
        # select a file
        name, _ = QFileDialog.getOpenFileName(self, 'Video/image', open_fold, "Pic File(*.mp4 *.mkv *.avi *.flv "
                                                                              "*.jpg *.png)")
        # file exist
        if name:
            # set detect source parameter
            self.det_thread.source = name
            # show msg on status bar
            self.statistic_msg('Loaded file：{}'.format(os.path.basename(name)))
            # save present path to json file
            config['open_fold'] = os.path.dirname(name)
            config_json = json.dumps(config, ensure_ascii=False, indent=2)
            with open(config_file, 'w', encoding='utf-8') as f:
                f.write(config_json)
            self.stop()
        if MainWindow.tip_time2 == 0:
            MainWindow.tip_time2 += 1
            QTimer.singleShot(4000, self.show_popup_dialog1)
    def max_or_restore(self):
        if self.maxButton.isChecked():
            self.showMaximized()
        else:
            self.showNormal()

    def run_or_continue(self):
        # change the flag
        self.det_thread.jump_out = False
        if self.runButton.isChecked():
            self.det_thread.is_continue = True
            if not self.det_thread.isRunning():
                self.det_thread.start()
            source = os.path.basename(self.det_thread.source)
            source = 'camera' if source.isnumeric() else source
            self.statistic_msg('Detecting >> model：{}，file：{}'.
                               format(os.path.basename(self.det_thread.weights),
                                      source))
        else:
            self.det_thread.is_continue = False
            self.statistic_msg('Pause')
        if MainWindow.tip_time3 == 0:
            MainWindow.tip_time3 += 1
            QTimer.singleShot(4000, self.show_popup_dialog2)
        if MainWindow.tip_time4 == 0:
            MainWindow.tip_time4 += 1
            QTimer.singleShot(8000, self.show_popup_dialog4)

    def stop(self):
        self.det_thread.jump_out = True

    # tell the position of the cursor relative to the window ->show different states
    def region(self, cursorGlobalPoint):
        # obtain the position of window on screen tl:top left,rb:right bottom
        rect = self.rect()
        tl = self.mapToGlobal(rect.topLeft())
        rb = self.mapToGlobal(rect.bottomRight())
        # position of the cursor
        x = cursorGlobalPoint.x()
        y = cursorGlobalPoint.y()
        if tl.x() + PADDING >= x >= tl.x() and tl.y() + PADDING >= y >= tl.y():
            # left top
            self.dir = 4
            # set the shape of cursor
            self.setCursor(QCursor(Qt.SizeFDiagCursor))
        elif rb.x() - PADDING <= x <= rb.x() and rb.y() - PADDING <= y <= rb.y():
            # right bottom
            self.dir = 6
            self.setCursor(QCursor(Qt.SizeFDiagCursor))
        elif tl.x() + PADDING >= x >= tl.x() and rb.y() - PADDING <= y <= rb.y():
            # left bottom
            self.dir = 5
            self.setCursor(QCursor(Qt.SizeBDiagCursor))
        elif rb.x() >= x >= rb.x() - PADDING and tl.y() <= y <= tl.y() + PADDING:
            # right top
            self.dir = 7
            self.setCursor(QCursor(Qt.SizeBDiagCursor))
        elif tl.x() + PADDING >= x >= tl.x():
            # left side
            self.dir = 2
            self.setCursor(QCursor(Qt.SizeHorCursor))
        elif rb.x() >= x >= rb.x() - PADDING:
            # right side
            self.dir = 3
            self.setCursor(QCursor(Qt.SizeHorCursor))
        elif tl.y() <= y <= tl.y() + PADDING:
            # top side
            self.dir = 0
            self.setCursor(QCursor(Qt.SizeVerCursor))
        elif rb.y() >= y >= rb.y() - PADDING:
            # bottom side
            self.dir = 1
            self.setCursor(QCursor(Qt.SizeVerCursor))
        else:
            # default
            self.dir = 8
            self.setCursor(QCursor(Qt.ArrowCursor))

    # rewrite the events about mouse
    def mouseReleaseEvent(self, event):
        if event.button() == Qt.LeftButton:
            # left button release
            self.isLeftPressDown = False
            if self.dir != 8:
                self.releaseMouse()
                # recover the shape of the cursor
                self.setCursor(QCursor(Qt.ArrowCursor))

    def mousePressEvent(self, event):
        if event.button() == Qt.LeftButton:
            # left buttom down
            self.isLeftPressDown = True
            # zoom
            if self.dir != 8:
                self.mouseGrabber()
            # move
            else:
                self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()

    def mouseMoveEvent(self, event):
        gloPoint = event.globalPos()
        rect = self.rect()
        tl = self.mapToGlobal(rect.topLeft())
        rb = self.mapToGlobal(rect.bottomRight())

        # jud cursor state
        if not self.isLeftPressDown:
            self.region(gloPoint)
        # left key down
        else:
            # zoom
            if self.dir != 8:
                remove = QRect(tl, rb)
                if self.dir == 2:
                    if rb.x() - gloPoint.x() <= self.minimumWidth():
                        remove.setX(tl.x())
                    else:
                        remove.setX(gloPoint.x())
                elif self.dir == 3:
                    remove.setWidth(gloPoint.x() - tl.x())
                elif self.dir == 0:
                    if rb.y() - gloPoint.y() <= self.minimumHeight():
                        remove.setY(tl.y())
                    else:
                        remove.setY(gloPoint.y())
                elif self.dir == 1:
                    remove.setHeight(gloPoint.y() - tl.y())
                elif self.dir == 4:
                    if rb.x() - gloPoint.x() <= self.minimumWidth():
                        remove.setX(tl.x())
                    else:
                        remove.setX(gloPoint.x())
                    if rb.y() - gloPoint.y() <= self.minimumHeight():
                        remove.setY(tl.y())
                    else:
                        remove.setY(gloPoint.y())
                elif self.dir == 7:
                    remove.setWidth(gloPoint.x() - tl.x())
                    remove.setY(gloPoint.y())
                elif self.dir == 5:
                    remove.setX(gloPoint.x())
                    remove.setHeight(gloPoint.y() - tl.y())
                elif self.dir == 6:
                    remove.setWidth(gloPoint.x() - tl.x())
                    remove.setHeight(gloPoint.y() - tl.y())
                else:
                    pass
                self.setGeometry(remove)
            # move
            else:
                self.move(event.globalPos() - self.dragPosition)
                event.accept()

    @staticmethod
    def show_image(img_src, label):
        # exceptions process
        try:
            ih, iw, _ = img_src.shape
            w = label.geometry().width()
            h = label.geometry().height()
            # keep original aspect ratio
            if iw / w > ih / h:
                scal = w / iw
                nw = w
                nh = int(scal * ih)
                # opencv zoom to guarantee the clarity
                img_src_ = cv2.resize(img_src, (nw, nh))

            else:
                scal = h / ih
                nw = int(scal * iw)
                nh = h
                img_src_ = cv2.resize(img_src, (nw, nh))
            # change bgr to rgb
            frame = cv2.cvtColor(img_src_, cv2.COLOR_BGR2RGB)
            # change to QImage
            img = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[2] * frame.shape[1],
                         QImage.Format_RGB888)
            # show the img
            label.setPixmap(QPixmap.fromImage(img))

        except Exception as e:
            print(repr(e))

    # show text data and save
    def show_statistic(self, statistic_dic, cnt):
        try:
            # pyexceal operate the .xlsx file
            workbook = openpyxl.load_workbook('./excel/result_excel.xlsx')
            self.resultWidget.clear()
            statistic_dic = sorted(statistic_dic.items(), key=lambda x: x[1], reverse=True)
            statistic_dic = [i for i in statistic_dic if i[1] > 0]
            sheet = workbook.active
            col = sheet.max_column
            for val in statistic_dic:
                flag = 0
                cell = sheet['1']
                for i in cell:
                    if i.value == str(val[0]):
                        flag = 1
                        sheet[i.column_letter + str(cnt)] = val[1]
                if flag == 0:
                    col += 1
                    sheet[get_column_letter(col) + '1'] = str(val[0])
                    sheet[get_column_letter(col) + str(cnt)] = val[1]
            sheet['A' + str(cnt)] = cnt - 1
            workbook.save('./excel/result_excel.xlsx')
            # show text data
            results = [' ' + str(i[0]) + '：' + str(i[1]) for i in statistic_dic]
            self.resultWidget.addItems(results)
            # save data to .txt
            f = open('./labels/labels.txt', "a")
            f.write(str(cnt - 1) + ':' + str(results) + '\n')
            f.close()
            # prepared for next data
            self.det_thread.statistic_dic.clear()
        except Exception as e:
            print(repr(e))

    def closeEvent(self, event):
        self.det_thread.jump_out = True
        MessageBox(
            self.closeButton, title='Tips', text='Closing the program', time=2000, auto=True).exec_()
        config_file = 'config/setting.json'
        # save the parameters into json file
        config = dict()
        config['iou'] = self.iouSpinBox.value()
        config['conf'] = self.confSpinBox.value()
        config['rate'] = self.rateSpinBox.value()
        config['check'] = self.checkBox.checkState()
        config_json = json.dumps(config, ensure_ascii=False, indent=2)
        with open(config_file, 'w', encoding='utf-8') as f:
            f.write(config_json)
        # clear export file path
        shutil.rmtree('./result')
        os.mkdir('./result')
        shutil.rmtree('./labels')
        os.mkdir('./labels')
        sys.exit(0)

    def resetOutFile(self):
        shutil.rmtree('./result')
        os.mkdir('./result')
        shutil.rmtree('./labels')
        os.mkdir('./labels')
        self.det_thread.dataCnt = 1
        # clean the excel table
        workbook = openpyxl.load_workbook('./excel/result_excel.xlsx')
        sheet = workbook.active
        sheet.delete_rows(1, sheet.max_row)
        sheet.delete_cols(1, sheet.max_column)
        workbook.save('./excel/result_excel.xlsx')

    def show_popup_dialog1(self):
        self.popup_dialog = PopupDialog(self,1)
        btn_global_pos = self.runButton.mapToGlobal(QPoint(0, 0))
        self.popup_dialog.move(btn_global_pos.x()-38,btn_global_pos.y()-137)
        self.popup_dialog.show()

    def show_popup_dialog2(self):
        self.popup_dialog = PopupDialog(self,2)
        btn_global_pos = self.comboBox.mapToGlobal(QPoint(0, 0))
        self.popup_dialog.move(btn_global_pos.x()-300,btn_global_pos.y())
        self.popup_dialog.show()

    def show_popup_dialog3(self):
        self.popup_dialog = PopupDialog(self, 3)
        btn_global_pos = self.inputlabel.mapToGlobal(QPoint(0, 0))
        self.popup_dialog.move(btn_global_pos.x() + 100, btn_global_pos.y())
        self.popup_dialog.show()

    def show_popup_dialog4(self):
        self.popup_dialog = PopupDialog(self, 4)
        btn_global_pos = self.outputlabel.mapToGlobal(QPoint(0, 0))
        self.popup_dialog.move(btn_global_pos.x() +100, btn_global_pos.y())
        self.popup_dialog.show()

class PopupDialog(QDialog):
    def __init__(self, parent=None,type=None):
        super().__init__(parent, Qt.FramelessWindowHint)
        self.setWindowFlag(Qt.WindowStaysOnTopHint)
        self.setAttribute(Qt.WA_TranslucentBackground)
        self.init_ui(type)

    def init_ui(self,type):
        self.setStyleSheet("""
                            QLabel {
                                color:rgb(78,161,239);
                                font-size:30px;
                                font-family:Consolas;
                            }
                            QPushButton{    
                                text-align: center;
                                font-size: 20px;
                                padding:5px;
                                color: rgb(206, 209, 212);
                                border: 2px solid rgb(206, 209, 212);
                                border-radius: 17px
                            }
                            QPushButton::hover{
                                border-color:rgb(78,161,239);
                                color:rgb(78,161,239);
                            }
                            """)

        self.background = QLabel(self)
        self.background.setGeometry(0, 0, 200, 150)
        if type!= 1:
            self.background.setGeometry(0, 0, 200, 130)
        # 加载背景图片
        if type==1:
            pixmap = QPixmap('dia.png')  # 修改为你自己的图片路径
        elif type == 2:
            pixmap = QPixmap('dia2.png')
        else:
            pixmap = QPixmap('dia3.png')
        self.background.setPixmap(pixmap)
        self.background.setScaledContents(True)

        self.setWindowTitle('弹出窗口')
        self.setFixedSize(200, 150)
        if type!=1:
            self.setFixedSize(200, 130)
        if type==1:
            label = QLabel('点这里运行', self)
        elif type==2:
            label = QLabel('这里调参数', self)
        elif type==3:
            label = QLabel('这里选输入', self)
        else:
            label = QLabel('这里可输出', self)
        label.move(25, 30)
        self.ok_button = QPushButton('确定', self)
        self.ok_button.setGeometry(110, 70, 60, 40)
        self.ok_button.clicked.connect(self.close)  # 连接按钮点击事件和关闭窗口的方法


if __name__ == "__main__":
    app = QApplication(sys.argv)
    myWin = MainWindow()
    myWin.show()
    sys.exit(app.exec_())
