"""
brief :Editor cjh
"""
import argparse
import math
import time

import sys
import cv2
import matplotlib

matplotlib.use("Qt5Agg")  # 声明使用pyqt5

import numpy as np
import serial
from Map_Reflect_utils import Realsense
from utils_track import vino as ov
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from utils_PYQT.uiPython.TrackerTime import Ui_TrackerTime
from utils_PYQT.uiPython.mainWindow_track import Ui_mainwindow

import logging
import serial.tools.list_ports as ser_list
import multiprocessing
from utils_track import ball_track_vino
# from utils_track import hsv_balltrack
import time

import signal
import os

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')

pi = math.pi
_translate = QtCore.QCoreApplication.translate
redcode = 0
bluecode = 1


def p1_start():
    global t_1, share_data
    share_data = multiprocessing.Manager().dict()
    # 切换串口的同时开始创建一个新的进程，在新的经常中调用32
    t_1 = multiprocessing.Process(target=ball_track_vino.HSVTrackball(code).track_init, args=(share_data,))
    t_1.start()


def video_record(width, height, FPS, name):
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(f"source_video/{name}{time.strftime('%Y-%m-%d', time.localtime(time.time()))}.avi", fourcc,
                          FPS, (width, height))
    return out


def ser_open(port0="/dev/ttyUSB1"):
    ser2 = serial.Serial()
    ser2.baudrate = 115200  # 波特率
    ser2.port = port0
    ser2.timeout = None  # 设置无限长串口等待时间，避免关机串口超时
    if ser2.isOpen():
        print("串口打开成功")
    else:
        ser2.open()
    return ser2.isOpen(), ser2


def open_ser():
    for i in ser_list.comports():
        print(i[0])
        ret, ser2 = ser_open(f"{i[0]}")
        ser2.reset_input_buffer()
        time.sleep(0.1)  # 每次打开串口需要延迟几毫米
        print(ser2.in_waiting)
        if not ser2.in_waiting:  # 读取到信息后保留这个串口
            return ret, ser2


def send_msg(jd):
    # print(f"数据{flag}")
    try:
        ser2.write(str(jd).encode("gbk"))
    except Exception as exc:
        print("发送异常", exc)


def send_data_change(data: float) -> str:
    data = round(data)
    data_postive = abs(data)
    if data > 0:
        data_ = str("P")
    else:
        data_ = str("N")
    # 由于将无法判断处看作无穷大，因此变为0
    if data_postive >= 10000:
        data_ += str("0000")
    elif data_postive >= 1000:
        data_ += str(data_postive)
    elif data_postive >= 100:
        data_ += str("0") + str(data_postive)
    elif data_postive >= 10:
        data_ += str("00") + str(data_postive)
    elif data_postive >= 0:
        data_ += str("000") + str(data_postive)
    else:
        data_ = str("0000")
    return data_


def data_solve(best_coord, dist_):
    """
    brief:
    best_coord:最近的球的三维坐标 type:array
    dist_:最近的球的最短距离 type:float
    get_:通过高度判断是否夹到球 type:bool
    """
    # 计算x，y的点和距离
    # print(best_coord)
    str_send = str('kk') + send_data_change(best_coord[0] * 1000) + send_data_change(
        best_coord[1] * 1000) + send_data_change(dist_) + str("F" if dist_ == 100000 else "T")
    send_msg(str_send)


def data_solve_hsv(best_coord):
    # 计算x，y的点和距离
    str_send = str('cc') + send_data_change(best_coord[0]) + send_data_change(
        best_coord[1]) + f'{"TT" if (True if (len(best_coord) == 3 and  best_coord[2]>250000) else False) else "FF"}'
    # 通过高度距离来判断，是否取到球了
    # print(str_send)
    send_msg(str_send)


# 利用一个控制器来控制页面的跳转
class Main_Controller:
    def __init__(self):
        self._mainWindow = Main_Window()
        self.Track = Track_Window()

    # 跳转到 mainWindow 窗口
    def mainWindow(self):
        self._mainWindow.switch_window_main.connect(self.TrackTime_to)
        self._mainWindow.show()

    # 跳转到 BucketTime_to 窗口, 注意关闭原页面
    def TrackTime_to(self):
        self.Track.switch_window_bucket.connect(self.mainWindow)
        self.Track.show()
        # 传入棋子的颜色
        self.Track.track_init()


class Main_Window(QWidget, Ui_mainwindow):
    switch_window_main = QtCore.pyqtSignal()  # 跳转信号

    def __init__(self):
        super(Main_Window, self).__init__()
        self.setupUi(self)
        self.retranslateUi(self)
        self.pushButton.clicked.connect(self.go_to_Bucket)
        self.pushButton.clicked.connect(self.close)
        self.pushButton_2.clicked.connect(self.close)
        # 获得传参参数中的棋子

    def go_to_Bucket(self):
        # 每一次切换都会检测到，选用哪个棋子
        global code, track_time, out_track, out_hsv
        code = bluecode if self.radioButton_2.isChecked() else redcode
        out_track = video_record(848, 480, FPS=30, name="depth")
        out_hsv = video_record(1280, 720, FPS=30, name="hsv")
        # 实例化对象
        track_time = track()
        # 打开前端相机的跟踪
        p1_start()
        track_time.track_init(code)
        self.switch_window_main.emit()


class track(object):
    def __init__(self):
        super(track, self).__init__()
        self.model = None
        self.cap = None

    # 计算最短距离
    def calc(self, x, y, center, mid_x, shape_x):
        return math.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2 + (mid_x - shape_x) ** 2)

    # 通过一个掩模去除紫球对识别的影响
    def purple_block(self, image):
        image_copy = image.copy()
        image_bc = cv2.convertScaleAbs(image_copy, alpha=0.54, beta=95)
        image_gauss = cv2.GaussianBlur(image_bc, (7, 7), 5)
        image_hsv = cv2.cvtColor(image_gauss, cv2.COLOR_BGR2HSV)
        lower = np.array([155, 28, 0])
        upper = np.array([174, 133, 255])
        mask_purple = cv2.inRange(image_hsv, lower, upper)
        opening = cv2.morphologyEx(mask_purple, cv2.MORPH_OPEN, kernel=np.ones((9, 9), np.uint8), iterations=5)
        image_mask_purple = cv2.bitwise_not(image, image, mask=opening)
        return image_mask_purple.astype(np.uint8)

    def camera_to_world(self, xc, yc, zc):
        # xc,yc,zc是相机坐标系下的坐标，现在转化为世界坐标系下的坐标
        RT = self.pose_robot(0, 130, 0, 0.0, 0.170, 0.38)
        trans = np.array([xc, yc, zc, 1]).T
        x_tran, y_tran, z_tran, _ = (RT @ trans)  # 传出作为全局变量s
        return x_tran, y_tran, z_tran

    def myRPY2R_robot(self, x, y, z):
        Rx = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
        Ry = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
        Rz = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
        R = Rz @ Ry @ Rx
        return R

    # 用于根据位姿计算变换矩阵
    def pose_robot(self, x, y, z, Tx, Ty, Tz):
        thetaX = x / 180 * pi
        thetaY = y / 180 * pi
        thetaZ = z / 180 * pi
        R = self.myRPY2R_robot(thetaX, thetaY, thetaZ)
        t = np.array([[Tx], [Ty], [Tz]])
        RT1 = np.column_stack([R, t])  # 列合并
        RT1 = np.row_stack((RT1, np.array([0, 0, 0, 1])))
        # RT1=np.linalg.inv(RT1)
        return RT1

    def run_track(self):
        start = time.time()
        min_dist = 100000
        min_dist_out = 100000
        frame, image_depth, depth_intrin, aligned_depth_frame = realsense_cap.cam_run(self.cap)
        frame_origin = frame.copy()
        frame = self.purple_block(frame)
        frame, det = self.model.run(frame)
        # 初始化几个值,面对没有出现目标的时候
        coord = (int(frame.shape[1] / 2), frame.shape[0])
        coord_3d = [0, 0, 0]
        if len(det):
            for xyxy, conf, cls in reversed(det):  # 识别物的类别像素坐标，置信度，物体类别号
                mid_pos = [int((xyxy[0] + xyxy[2]) / 2),
                           int((xyxy[1] + xyxy[3]) / 2)]
                cv2.circle(frame, mid_pos, 2, (0, 255, 0), 2)
                min_val = min(abs(int(xyxy[2]) - int(xyxy[0])), abs(int(xyxy[3]) - int(xyxy[1])))
                # 将中心点向四周扩展计算深度

                z, y, x = realsense_cap.depth_to_data(depth_intrin, aligned_depth_frame, mid_pos,
                                                      min_val)
                # 换到车上坐标系,先判断是否存在一个合理的值
                x, y, z = self.camera_to_world(x, y, z) if (x + y + z) != 0 else (np.inf, np.inf, np.inf)
                x, y, z = round(x, 3), round(y, 3), round(z, 3)

                cv2.putText(frame, f"{x, y, z}", mid_pos, cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)

                # 添加目标物参数
                dist_last1 = self.calc(round(x, 1), round(y, 1), (0, 0), mid_pos[0] / 1e5, 220 / 1e5)
                dist_last2 = self.calc(x, y, (0, 0), 0, 0)
                if min_dist > dist_last1:
                    min_dist = dist_last1
                    min_dist_out = dist_last2
                    coord = mid_pos
                    x, y, z = round(x, 2), round(y, 2), round(z, 2)
                    coord_3d = [x, y, z]
        data_solve(coord_3d, round(min_dist_out * 100, 2))
        end = time.time()
        fps = 1 / (end - start)
        cv2.line(frame, (int(frame.shape[1] / 2), frame.shape[0]), coord, (255, 0, 0), 3)
        cv2.putText(frame, f"{round(fps, 1)}", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)
        return frame, coord_3d, coord, frame_origin

    # This function is used to choose argument,which is called init function
    def parse_opt(self, code=0):
        parser = argparse.ArgumentParser()
        parser.add_argument('--model_path', type=str,
                            default="/home/nuc2/PycharmProjects/yolov5-master/weights/best_track.xml",
                            help='model path')
        parser.add_argument('--weights_path', nargs='+', type=str,
                            default="/home/nuc2/PycharmProjects/yolov5-master/weights/best_track.bin",
                            help='weights path or triton URL')
        parser.add_argument('--conf-thres', type=float, default=0.6, help='confidence threshold')
        parser.add_argument('--line-thickness', default=1, type=int, help='bounding box thickness (pixels)')
        parser.add_argument('--iou-thres', type=float, default=0.55, help='NMS IoU threshold')
        parser.add_argument('--classes', type=list, default=[code], help='Classes')
        parser.add_argument('--img-size', type=int, default=736, help='img-size')
        parser.add_argument('--device', default='GPU', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--name-porcess', default='BucketTime', help='name to processs--cjh')
        opt = parser.parse_args()

        return opt

    # start function
    def track_init(self, code):
        global ser2, realsense_cap
        # 使其为全局变量，不然无法添加
        opt = self.parse_opt(code)
        self.model = ov.Vino(**vars(opt))
        realsense_cap = Realsense.realsense(enable_auto_expose=True, enable_auto_white_blance=False,
                                            device_id="215122251231")
        self.cap = realsense_cap.cam_init(640)
        ser_flag, ser2 = open_ser()


class Track_Window(QWidget, Ui_TrackerTime):
    switch_window_bucket = QtCore.pyqtSignal()  # 跳转信号

    def __init__(self):
        super(Track_Window, self).__init__()
        self.setupUi(self)
        self.retranslateUi(self)
        self.recording_counter = 10000
        self.recording_flag = False
        # 实例化对象
        self.timer_camera = QtCore.QTimer()
        self.timer_camera.timeout.connect(self.show_video)
        self.logger = logging.getLogger("TrackTime")
        # 定义按键
        # start
        # self.pushButton.clicked.connect(self.track_init)
        # Restart
        # self.pushButton_3.clicked.connect(self.Restart)
        # self.pushButton_3.clicked.connect(self.close)
        # stop
        self.pushButton_4.clicked.connect(self.Close)
        self.pushButton_4.clicked.connect(self.close)

    def track_init(self):
        self.timer_camera.start(1)

    def Restart(self):
        os.kill(t_1.pid, signal.SIGTERM)
        self.timer_camera.stop()
        try:
            realsense_cap.pipeline.stop()
            print("Successfully Stop cam-pipe")
        except:
            print("No this Process")
        self.logger.warning("Restart")
        self.switch_window_bucket.emit()

    def Close(self):
        os.kill(t_1.pid, signal.SIGTERM)
        self.timer_camera.stop()
        try:
            # 关闭视频流
            realsense_cap.pipeline.stop()
            print("Successfully Stop cam-pipe")
        except:
            print("No this Process")
        self.logger.warning("Close and Return main page")
        self.switch_window_bucket.emit()

    def show_video(self):
        frame,  coord_3d, mid_pos, frame_origin = track_time.run_track()
        im0_color = np.ones((frame.shape[1], frame.shape[0], 3), dtype=np.uint8) * 255  # 如果没有成功打开相机则白屏
        coord_data = share_data["coord_data"]
        if share_data:
            try:
                im0_color = share_data['im0']
            except Exception as es:
                self.logger.info(f"{es}")
        data_solve_hsv(coord_data)
        if self.pushButton_2.isChecked():
            out_hsv.write(im0_color)
            out_track.write(frame_origin)
            # 拥有闪铄的效果的标记效果
            # self.recording_flag = True if not self.recording_flag and not self.recording_counter % 50 else False
            if not self.recording_counter % 5:
                self.recording_flag = True if not self.recording_flag else False

            if self.recording_counter > 0 and self.recording_flag:
                cv2.putText(im0_color, "Start Recording", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 1,
                            (0, 0, 255),
                            2)
                cv2.putText(frame, f"Start Recording", (100, 100), cv2.FONT_HERSHEY_COMPLEX, 1,
                            (0, 0, 255),
                            2)
            self.recording_counter -= 1 if self.recording_counter > 0 else 0
        else:
            self.recording_counter = 50  # 设定一个定时器

        self.show_cv_img(frame, coord_data, coord_3d, mid_pos, im0_color)

    def show_cv_img(self, frame, coord_data, coord_3d, mid_pos, im0_color):
        shrink_im0 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        shrink_reflect = cv2.cvtColor(im0_color, cv2.COLOR_BGR2RGB)
        QtImg_im0 = QtGui.QImage(shrink_im0.data,
                                 shrink_im0.shape[1],
                                 shrink_im0.shape[0],
                                 shrink_im0.shape[1] * 3,
                                 QtGui.QImage.Format_RGB888)
        QtImg_color_track = QtGui.QImage(shrink_reflect.data,
                                         shrink_reflect.shape[1],
                                         shrink_reflect.shape[0],
                                         shrink_reflect.shape[1] * 3,
                                         QtGui.QImage.Format_RGB888)
        jpg_im0_out = QtGui.QPixmap(QtImg_im0).scaled(
            self.label_3.width(), self.label_3.height())
        jpg_Reflect_out = QtGui.QPixmap(QtImg_color_track).scaled(
            self.label_3.width(), self.label_3.height())
        self.label_3.setPixmap(jpg_im0_out)
        self.label_4.setPixmap(jpg_Reflect_out)
        self.label.setText(_translate("TrackerTime", f"mid_pos:{mid_pos}"))
        self.label_5.setText(_translate("TrackerTime", f"{'GOT' if (len(coord_data) == 3 and  coord_data[2]>250000) else 'NO' }"))
        self.label_2.setText(_translate("TrackerTime", f"coord_3d:{coord_3d}"))


def main():
    app = QtWidgets.QApplication(sys.argv)
    main_Controller = Main_Controller()  # 控制器实例
    main_Controller.mainWindow()  # 默认展示的是 hello 页面
    sys.exit(app.exec_())


if __name__ == "__main__":
    main()
    # track.main()
