import os
import os.path as osp
import random
import sys
import qdarkstyle
from layout import Ui_MainWindow
from PyQt5 import QtGui
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import numpy as np
import cv2
from threading import Thread
from UAV_path import Ui_Form
from mmdet.apis import init_detector, inference_detector
import mmcv
import time
import imgviz
from Config import Detector_para, class_name_to_id, Cls_Conf_Thre_para, create_shanghai_label_colormap
import matplotlib

matplotlib.use('Qt5Agg')
exit_signal = False


class UAV_Path(QWidget, Ui_Form):
    """
    推流地址输入界面
    名称：UAV_Path
    作用：按钮退出界面
    """
    path = ''
    path_info_signal = pyqtSignal(str)

    def __init__(self):
        super(UAV_Path, self).__init__()
        self.setupUi(self)
        self.pushButton.clicked.connect(self.close)
        self.pushButton.clicked.connect(self.emit_path)

    def emit_path(self):
        self.path = self.textEdit.toPlainText()
        self.path_info_signal.emit(self.path)


class MyMainWindow(QMainWindow, Ui_MainWindow):
    """
    主界面定义
    名称：MyMainWindow
    作用：定义界面上按钮和显示内容
    """
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.initUI()
        self.cap = cv2.VideoCapture()
        self.detector = Detecor()
        self.uav_path = UAV_Path()
        self.exit.triggered.connect(self.close)
        self.openvideo.triggered.connect(self.select_video_image)
        self.openDJ.triggered.connect(self.select_DJ_video)
        self.openimg.triggered.connect(self.select_image)
        self.opendir.triggered.connect(self.select_dir)
        self.pushButton_8.clicked.connect(self.close)
        self.pushButton_8.clicked.connect(self.close_thread)
        self.pushButton_3.clicked.connect(self.select_video_image)
        self.pushButton_6.clicked.connect(self.select_image)
        self.pushButton_7.clicked.connect(self.select_dir)
        self.pushButton_5.clicked.connect(self.program_start)
        self.pushButton_4.clicked.connect(self.suspend)
        self.pushButton_9.clicked.connect(self.select_DJ_video)

    def initUI(self):
        """
        界面图标 名称初始化
        Returns:

        """
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move((screen.width() - size.width()) / 2, (screen.height() - size.height()) / 2)
        self.setWindowIcon(QIcon(Detector_para['icon']))
        self.setWindowTitle('智慧林业识别系统')

    def suspend(self):
        """
        检测终止信号
        Returns:

        """
        sender = self.sender()
        print(sender.text() + '按钮被按下了')
        global exit_signal
        exit_signal = True
        img_signal.show_image.disconnect()

    def close_thread(self):
        global exit_signal
        exit_signal = True
        img_signal.show_image.disconnect()

    def select_video_image(self):
        """
        视频文件输入
        Returns:

        """
        self.file_name, self.file_type = QFileDialog.getOpenFileName(self, "选取视频文件", "./",
                                                                     "Video Files (*.avi *.mp4 *.MOV *.MP4)")
        print(self.file_type)
        if self.file_type.find("Video") >= 0:
            if self.file_name:
                self.cap.open(self.file_name)

    def select_DJ_video(self):
        """
        推流地址输入
        Returns:

        """
        # self.file_name = 'rtmp://47.102.200.123/live/livestream'
        # self.cap = cv2.VideoCapture(self.file_name)
        self.uav_path.show()
        # 点击ok信号接收到path
        self.uav_path.path_info_signal.connect(self.get_path)

    def get_path(self, path):
        self.file_name = path
        # self.cap = cv2.VideoCapture(self.file_name)
        self.cap.open(self.file_name)

    def select_image(self):
        """
        单张图片输入
        Returns:

        """
        self.file_name, self.file_type = QFileDialog.getOpenFileName(self, "选取图片文件", "./",
                                                                     "Image Files (*.jpg *.png *.bmp *.tif)")

    def select_dir(self):
        """
        文件夹输入
        Returns:

        """
        self.file_name = QFileDialog.getExistingDirectory(self, "选取文件夹", "./", )

    def program_start(self):
        """
        根据不同输入源选择不同的操作
        Returns:

        """
        global exit_signal
        exit_signal = False
        video_path = self.file_name
        if video_path.endswith(".MP4") or video_path.endswith(".MOV") or video_path.endswith(
                ".mp4") or video_path.endswith(".avi"):
            capture_thread = Thread(target=capture_thread_func, kwargs={"detector": self.detector, "cap": self.cap})
            capture_thread.start()
            img_signal.show_image.connect(lambda _out_image: show_video_img(_out_image))
        elif video_path.endswith(".jpg") or video_path.endswith(".png") or video_path.endswith(
                ".bmp") or video_path.endswith(".tif"):
            img = cv2.imread(video_path)
            # style1
            self.detector.solo_detect(img, "1")
            img_signal.show_image.connect(lambda _out_image: show_video_img(_out_image))

            # style2
            # out_img = self.detector.solo_detect(img, "1")
            # img_out_show = cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB)
            # height, width, bytesP = img_out_show.shape
            # bytesPerLine = bytesP * width
            # q_image = QImage(img_out_show.data, width, height, bytesPerLine, QImage.Format_RGB888)
            # self.label.setPixmap(QPixmap.fromImage(q_image).scaled(self.label.width(), self.label.height()))
        elif 'rtmp' in video_path:
            capture_thread = Thread(target=capture_thread_func, kwargs={"detector": self.detector, "cap": self.cap})
            capture_thread.start()
            img_signal.show_image.connect(lambda _out_image: show_video_img(_out_image))
        else:
            capture_thread_dirimg = Thread(target=capture_thread_dirimg_func,
                                           kwargs={"detector": self.detector, "video_path": video_path})
            capture_thread_dirimg.start()
            img_signal.show_image.connect(lambda _out_image: show_video_img(_out_image))

        def show_video_img(_out_image):
            image_np_RGB = cv2.cvtColor(_out_image, cv2.COLOR_BGR2RGB)
            height, width, bytesP = image_np_RGB.shape
            q_image = QtGui.QImage(image_np_RGB.data, width, height, QtGui.QImage.Format_RGB888)
            self.label.setPixmap(QtGui.QPixmap.fromImage(q_image).scaled(self.label.width(), self.label.height()))
            self.update()


def capture_thread_func(detector, cap):
    """
    实时视频流+本地
    Args:
        detector:
        cap:

    Returns:

    """
    global exit_signal
    detect_time = 0
    img_num = 1
    while not exit_signal:
        ret, frame = cap.read()
        if ret:
            detect_time += 0.01
            if detect_time - 0.03 > 0:
                detect_time = 0
                img_new_num = str(img_num).zfill(6)
                detector.solo_detect(frame, img_new_num)
                img_num += 1
                print("完成一帧")

        # 当该次视频流未检测到图像后 进行视频合成
        else:
            print("推流图像未检测到！")
            time.sleep(0.1)
            exit_signal = True
            print("等待下一次视频流!")


def capture_thread_dirimg_func(detector, video_path):
    """
    处理文件夹图片
    Args:
        detector:
        video_path:

    Returns:

    """
    global exit_signal
    while not exit_signal:
        for num, i in enumerate(os.listdir(video_path)):
            img = cv2.imread(osp.join(video_path, i))
            img_new_num = str(num).zfill(6)
            detector.solo_detect(img, img_new_num)
            print("完成一帧")


class _Signals(QObject):
    """
    界面显示图像的信号
    """
    show_image = pyqtSignal(object)


img_signal = _Signals()


class Detecor(QObject):
    """
    检测及识别类
    名称：Detecor
    作用：林业数据调用检测和识别函数
    """
    label_info = []
    label_info_st = []

    def __init__(self):
        super().__init__()
        self.class_name_to_id = class_name_to_id
        config_file = Detector_para['config_file']
        checkpoint_file = Detector_para['checkpoint_file']
        self.score_thr = Detector_para['score_thr']
        self.model = init_detector(config_file, checkpoint_file, device='cuda:0')

    def solo_detect(self, image, img_new_num):
        """
        检测的主程序，获得结果
        Args:
            image: 输入图像
            img_new_num: 检测图像的数量，用于编码输出图像的名字

        Returns: 界面的可视化检测结果

        """
        start_time = time.time()
        image = mmcv.imresize(image, (1332, 800))  # resize
        result = inference_detector(self.model, image)  # 检测结果
        end_time = time.time()
        t = end_time - start_time
        print("图片:{} 检测用时: {}秒, FPS={}".format(img_new_num, round(t, 2), round(1 / t, 1)))

        img_show = image.copy()
        h, w, _ = image.shape

        if not result or result == [None]:
            cv2.imwrite(osp.join(Detector_para['save_dir'], img_new_num + '.jpg'), img_show)
            return img_show

        # 计算后处理时间
        start_time_post = time.time()
        # 根据阈值筛选检测结果
        cate_label, num_mask, seg_label, cate_score = self.filter_result_thr(result)  # 根据统一阈值过滤
        # cate_label, num_mask, seg_label = self.filter_result_thr_cls(result)  # 按类别的阈值过滤

        # 对于每个类别，使用连通域处理去除小面积,以及对于两个互相连接的实例进行融合为一个
        masks = self.con_components_stats(h, w, cate_label, num_mask, seg_label)
        # masks = self.no_con_components_stats(h, w, cate_label, num_mask, seg_label, cate_score)  # 不进行连通域处理

        # 可视化结果
        colormap = create_shanghai_label_colormap()  # 定义类别的颜色映射
        viz = self.vis_save(img_show, masks, class_name_to_id, colormap, img_new_num)  # 保存可视化结果
        end_time_post = time.time()
        t_p = end_time_post - start_time_post
        print("图片:{},检测用时:{}秒,FPS={},后处理用时:{}, FPS={}".format(img_new_num, round(t, 2), round(1 / t, 1), round(t_p, 2),
                                                              round(1 / t_p, 1)))

        out_img = viz
        img_signal.show_image.emit(out_img)  # 主界面检测结果显示

        return img_show

    def filter_result_thr(self, result):
        """
        根据统一阈值过滤
        Args:
            result:

        Returns:

        """
        cur_result = result[0]
        seg_label = cur_result[0]
        seg_label = seg_label.cpu().numpy().astype(np.uint8)  # 0 1组成的二值图
        cate_label = cur_result[1]
        cate_label = cate_label.cpu().numpy()  # [0 0 1 0 0 0 1 0 0 0 1 0 0 1 1 0 0 1 0 0]
        score = cur_result[2].cpu().numpy()

        vis_inds = score > self.score_thr
        seg_label = seg_label[vis_inds]
        num_mask = seg_label.shape[0]  # 大于阈值的分割区域数量
        cate_label = cate_label[vis_inds]
        cate_score = score[vis_inds]

        return cate_label, num_mask, seg_label, cate_score

    def filter_result_thr_cls(self, result):
        """
        按类别的阈值过滤
        Args:
            result:

        Returns:

        """
        cur_result = result[0]
        seg_label = cur_result[0]
        seg_label = seg_label.cpu().numpy().astype(np.uint8)  # 0 1组成的二值图
        cate_label = cur_result[1]
        cate_label = cate_label.cpu().numpy()  # [0 0 1 0 0 0 1 0 0 0 1 0 0 1 1 0 0 1 0 0]
        score = cur_result[2].cpu().numpy()

        for i in range(len(cate_label) - 1, -1, -1):
            cls = cate_label[i]
            scr = score[i]

            if scr < Cls_Conf_Thre_para["score_thresh"][cls]:
                # if scr < 0:
                seg_label = np.delete(seg_label, i, 0)
                cate_label = np.delete(cate_label, i, 0)
                score = np.delete(score, i, 0)

        num_mask = seg_label.shape[0]  # 大于阈值的分割区域数量

        return cate_label, num_mask, seg_label

    def con_components_stats(self, h, w, cate_label, num_mask, seg_label):
        """
        对于每个类别，使用连通域处理去除小面积,以及对于两个互相连接的实例进行融合为一个
        Args:
            h: 高
            w: 宽
            cate_label: 类别
            num_mask: 检测数量
            seg_label: 检测后的mask

        Returns:

        """
        mask_cls = []  # 处理单个类别
        masks = {}  # mask
        class_all = [key for key in class_name_to_id.keys()][1:]
        for cls in range(len(class_all)):
            mask_cls.append(np.zeros((h, w)))
            if cls in cate_label:
                for ins in range(num_mask):
                    if cate_label[ins] == cls:
                        cur_mask = seg_label[ins, :, :]
                        # 把所有该类别的实例mask全部添加到一起
                        mask_cls[cls] = mask_cls[cls] + cur_mask

                new_cur_mask = mask_cls[cls].astype(np.uint8)
                # plt.imshow(new_cur_mask)
                # plt.show()
                # n_components 连通域的数目 labels 不同的数字表示不同的连通域 stats 是一个5列的矩阵，每一行对应每个连通区域的外接矩形的x、y、width、height和面积 0是背景
                n_components, labels, stats, _ = cv2.connectedComponentsWithStats(new_cur_mask, connectivity=4)
                # plt.imshow(labels)
                # plt.show()

                # 判断每个连通域的面积
                conf_list = list(range(50, 99))  # 用于置信度的设定
                for nc in range(n_components - 1):
                    area = stats[nc + 1, cv2.CC_STAT_AREA]
                    # 小于最小面积
                    if area < Cls_Conf_Thre_para["min_area"][cls]:
                        continue

                    cur_mask_bool = np.where(labels == nc + 1, 1, 0).astype(bool)
                    random_conf = random.choice(conf_list)
                    conf_list.remove(random_conf)
                    instance = (class_all[cls], int(random_conf))
                    masks[instance] = cur_mask_bool
                    tmp = class_all[cls]
                    self.label_info.append(class_all[cls])

        self.label_info_st = list(set(self.label_info))
        self.label_info_st.sort(key=self.label_info.index)
        print("当前帧检测结果: ", self.label_info_st)

        return masks

    def no_con_components_stats(self, h, w, cate_label, num_mask, seg_label, cate_score):
        """
        对于每个类别，不使用连通域处理去除小面积,以及对于两个互相连接的实例进行融合为一个
        Args:
            h: 高
            w: 宽
            cate_label: 类别
            num_mask: 检测数量
            seg_label: 检测后的mask
            cate_score: 检测后的score
        Returns:

        """
        masks = {}  # for area
        for idx in range(num_mask):
            idx = -(idx + 1)
            cur_mask = seg_label[idx, :, :]
            cur_mask = mmcv.imresize(cur_mask, (w, h))
            cur_mask = (cur_mask > 0.5).astype(np.uint8)
            if cur_mask.sum() == 0:
                continue
            cur_mask_bool = cur_mask.astype(np.bool)
            cur_cate = cate_label[idx]  # 类别的代号0
            cur_score = cate_score[idx]
            label = "".join([k for k, v in class_name_to_id.items() if v == cur_cate + 1])

            instance = (label, int(cur_score * 100))
            masks[instance] = cur_mask_bool
            self.label_info.append(label)

        self.label_info_st = list(set(self.label_info))
        self.label_info_st.sort(key=self.label_info.index)
        print("当前帧检测结果: ", self.label_info_st)

        return masks

    def vis_save(self, img_show, masks, class_name_to_id, colormap, img_new_num):
        """
        保存可视化结果
        Args:
            img_show: 可视化图像
            masks: (【类别，置信度】: mask)
            class_name_to_id: 类别字典
            colormap: 颜色映射
            img_new_num: 检测结果编号

        Returns:

        """
        viz = img_show
        # 把置信度写入captions
        if masks:
            labels, captions, masks = zip(
                *[
                    (class_name_to_id[cnm], cnm + str(":") + str(score), msk)
                    for (cnm, score), msk in masks.items()
                    if cnm in class_name_to_id
                ]
            )
            viz = imgviz.instances2rgb(
                image=img_show,
                labels=labels,
                masks=masks,
                captions=captions,
                font_size=15,
                line_width=2,
                boundary_width=1,  # 轮廓边宽度
                alpha=0.5,  # 混合图像的比例
                colormap=colormap,  # 类别映射的关系
            )
        # 保存
        out_viz_file = osp.join(Detector_para['save_dir'], "whole", img_new_num + "_out.jpg")
        viz_rgb = cv2.cvtColor(viz, cv2.COLOR_BGR2RGB)
        imgviz.io.imsave(out_viz_file, viz_rgb)

        return viz


if __name__ == '__main__':
    app = QApplication(sys.argv)
    myWin = MyMainWindow()
    app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
    myWin.showMaximized()
    myWin.show()
    sys.exit(app.exec())
