"""
Author: smilealvin92
Time: 11/21/2019
Version: 1.0
Reference: Stephane Vujasinovic and Frederic Uhrweiller
"""
import operator
import random

import numpy as np
import os
import cv2
import networkx as nx
from mathfunc import get_distance_between_face_point as gdbf

# 应该要降低图片的分辨率，才能符合原项目的参数
# Filtering对于分辨率比较大的图像，是否要增大滤波器？
kernel = np.ones((3, 3), np.uint8)


def R_verification(R_mto1, angle_x, angle_y, angle_z):  # rad
    print('开始检验Roll是否计算正确')
    xy_1 = np.array([0, 0, 1]).T  # 光轴
    xy_m = np.dot(R_mto1, xy_1)  # M系下的光轴
    # 计算光轴在XZ的投影与Z轴夹角，即1系Z轴在M系XZ平面的投影与M系Z轴夹角
    projection_mxz = pow(pow(xy_m[0], 2) + pow(xy_m[2], 2), 0.5)  # 向量O-xy_m在M系XZ平面投影的长度
    angle_mxz_z = np.arccos(xy_m[2] / projection_mxz)  # 向量O-xy_m在M系XZ平面投影与M系Z轴夹角，即光轴在XZ平面的投影与Z轴夹角
    print('pitch: ', angle_mxz_z * 180 / np.pi)
    # 计算光轴在ZY的投影与Z轴夹角，即1系Z轴在M系ZY平面的投影与M系Z轴夹角
    projection_myz = pow(pow(xy_m[1], 2) + pow(xy_m[2], 2), 0.5)  # 线段O-xz_m在M系ZY平面投影的长度
    angle_myz_z = np.arccos(xy_m[2] / projection_myz)  # 线段O-xz_m在M系ZY平面投影与M系Z轴夹角，即光轴在YZ平面的投影与Z轴夹角
    print('roll: ', angle_myz_z * 180 / np.pi)
    if abs(angle_mxz_z - angle_y) < 0.1 and abs(angle_myz_z - angle_x) < 0.1:
        # print('roll计算正确')
        return True
    else:
        return False


class StereoVision:
    def __init__(self, frame_w, frame_h, mouse_event,
                 target_img_dir, target_img_num,
                 f, dx, dy,
                 camera_xy_1, camera_xy_2, camera_degree_1, camera_degree_2):
        self.frame_w = frame_w
        self.frame_h = frame_h
        self.mouse_event = mouse_event
        self.target_img_dir = target_img_dir
        self.target_img_num = target_img_num

        # 相机内参数矩阵，包括焦距(fx, fy)以及像平面偏移量(u_0, v_0)，
        # 图像坐标系原点为成像平面中点，像素坐标系原点为成像平面左上角，像平面偏移量即主点（图像坐标系原点）在像素坐标系下的坐标
        # 内参矩阵为:
        # [ fx     0      u_0
        #   0      fy     v_0
        #   0      0       1 ]
        # 仿真相机的实际焦距为(fx, fy) = (2828.2829, 2828.2829)
        # 仿真相机的像平面偏移量真值为(960, 540)
        # 将仿真相机视为无畸变理想相机模型，fx = fy, u_0 = frame_w/2, v_0 = frame_h/2，畸变系数 = (k_1, k_2, p_1, p_2, k_3) 不考虑，不进行图像矫正
        self.f = f
        self.dx = dx
        self.dy = dy

        self.fx = f / dx
        self.fy = f / dy
        self.u_0 = frame_w / 2
        self.v_0 = frame_h / 2

        # 两个相机在世界坐标系下的坐标
        self.camera_xy_1 = camera_xy_1
        self.camera_xy_2 = camera_xy_2

        # 世界坐标系到两个相机的旋转角度
        self.camera_degree_1 = camera_degree_1
        self.camera_degree_2 = camera_degree_2

        self.pxu = 0
        self.pxv = 0
        self.T = [-0.5, 0, 0]
        self.R = 1

        self.R_1_to_m, self.T_1_to_m, self.R_m_to_1, self.T_m_to_1, \
        self.R_m_to_2, self.T_m_to_2, self.R_2_to_m, self.T_2_to_m = self.coordinate_transformation()

    def get_pxuv(self):
        pxuv = np.zeros((self.target_img_num, 2), dtype=int)

        # 双击图片获取目标像素，按列储存在"pxuv.npz"中
        tag_mouse_check = 1
        if tag_mouse_check:
            print('双击图像获取目标的像素坐标')
            for i in range(0, self.target_img_num):
                filename = str('test' + str(i) + '.jpg')
                image_to_process = cv2.imread(self.target_img_dir + filename, 1)
                cv2.namedWindow(filename, 0)
                cv2.imshow(filename, image_to_process)
                # Mouse click
                cv2.setMouseCallback(filename, self.get_pxuv_from_mouse, image_to_process)
                key = cv2.waitKey(0)
                pxuv[i, 0] = self.pxu
                pxuv[i, 1] = self.v_0 * 2 - self.pxv
            np.savez("pxuv_2_2.npz", pxuv_0=pxuv[:, 0], pxuv_1=pxuv[:, 1])
            print('目标像素坐标获取完毕，已保存:' + "\n" + str(pxuv))

    def get_pxuv_from_mouse(self, event, x, y, flags, param):
        # 双击触发
        if event == cv2.EVENT_LBUTTONDBLCLK:
            # (u,v)为像素坐标系，原点在图像左上角，u取值范围为[0,frame_w]，v取值范围为[0,frame_h]
            self.pxu = x
            self.pxv = y
            print("Img_uv: ", "(", self.pxu, ",", self.pxv, ")")

    def compute_distance(self):
        # 像素坐标系中的坐标
        if not os.path.exists("pxuv_2_2.npz"):
            print("请先获取目标像素坐标")
            self.get_pxuv()
        pxuv_file = np.load("pxuv_2_2.npz")
        px_u = np.array(pxuv_file['pxuv_0'])
        px_v = np.array(pxuv_file['pxuv_1'])
        pxuv = np.array((px_u, px_v)).T
        # for i in pxuv_file:
        # print(i)
        print('相机1中的目标像素坐标:' + "\n" + str(pxuv[0, :]))
        print('相机2中的目标像素坐标:' + "\n" + str(pxuv[1, :]))

        # 相机内参数接矩阵
        # [ fx     0      u_0
        #   0      fy     v_0
        #   0      0       1 ]
        mtx = np.array([[self.fx, 0, self.u_0], [0, self.fy, self.v_0], [0, 0, 1]])
        print('相机内参矩阵: ' + "\n" + str(mtx))

        # 相机外参矩阵
        # M到相机系为先平移后旋转，相机系到M系为先旋转再平移
        # R_1_to_m, T_1_to_m, R_m_to_1, T_m_to_1, R_m_to_2, T_m_to_2, R_2_to_m, T_2_to_m = self.coordinate_transformation()

        print("here")
        print(self.R_m_to_1)
        print(np.dot(self.R_m_to_1, np.array([np.sqrt(3), 1, np.sqrt(3)]).T))

        # 目标在像素坐标系下的坐标
        uv_px_1 = np.array([pxuv[0, 0], pxuv[0, 1], 1]).T
        uv_px_2 = np.array([pxuv[1, 0], pxuv[1, 1], 1]).T
        print('1号相机像素坐标系下的目标坐标', uv_px_1)
        print('2号相机像素坐标系下的目标坐标', uv_px_2)

        # 方法一、利用向量叉乘性质，此方法只在两相机的XZ轴在同一平面时适用(应该是XZ平面平行)，若不在同一平面则无解
        solvement_cross = True
        if solvement_cross:
            # 求1系到2系的转换矩阵，需先从1系转换到M系，在从M系转换到2系
            # R_m2 * (R_1m * X + T_1m + T_m2)
            # R = np.array(self.R)
            # T = np.array(self.T)
            R = np.dot(self.R_m_to_2, self.R_1_to_m)
            T = np.dot(self.R_m_to_2, self.T_1_to_m) + np.dot(self.R_m_to_2, self.T_m_to_2)
            # T = np.dot(R, T_1_to_m) + np.dot(R_m_to_2, T_m_to_2)
            # print('中间矩阵', np.dot(R_m_to_2, T_1_to_m))
            # print('旋转矩阵: ', R)
            # print('平移矩阵: ', T)

            # 求1相机的缩放因子
            # lambda_1 = inv(mtx_1) * P_1
            # lambda_2 = inv(mtx_2) * P_2

            lambda_1 = np.dot(np.linalg.inv(mtx), uv_px_1)
            lambda_2 = np.dot(np.linalg.inv(mtx), uv_px_2)
            print('lambda_1', lambda_1)
            print('lambda_2', lambda_2)
            # a_1 * lambda_2 × (R * lambda_1) + lambda_2 × T = 0
            R_lambda_1 = np.dot(R, lambda_1)
            lambda_2_R_lambda_1 = np.cross(lambda_2, R_lambda_1)
            lambda_2_T = np.cross(lambda_2, T)
            print('lambda_2_R_lambda_1', lambda_2_R_lambda_1)
            print('lambda_2_T', lambda_2_T)
            a_1_0 = -lambda_2_T[0] / lambda_2_R_lambda_1[0]
            a_1_1 = -lambda_2_T[1] / lambda_2_R_lambda_1[1]
            a_1_2 = -lambda_2_T[2] / lambda_2_R_lambda_1[2]
            print('a_1_0', a_1_0)
            print('a_1_1', a_1_1)
            print('a_1_2', a_1_2)
            # a_2 * inv(mtx) * p_2 = a_1 * R * inv(mtx) * p_1 + T
            # a_1 * p_1 = K * p_camera_1
            xy_c_1 = a_1_1 * np.dot(np.linalg.inv(mtx), uv_px_1)
            print('1号相机坐标系下目标坐标为: ' + "\n" + str(xy_c_1))
            distance_camera_1 = pow((pow(xy_c_1[0], 2) + pow(xy_c_1[1], 2) + pow(xy_c_1[2], 2)), 0.5)
            print('目标与1号相机距离为: ' + "\n" + str(distance_camera_1))

            xy_m = np.dot(self.R_1_to_m, np.dot(self.R_m_to_1, self.T_1_to_m) + xy_c_1)
            print('世界坐标系下目标的坐标为:', [xy_m[2], xy_m[0], xy_m[1]])

        # 方法二、相机光心与目标在图像坐标系上的投影点形成一直线，两相机对应直线交于目标点
        solvement_meetline = True
        if solvement_meetline:
            print('使用方法二求解')
            # 图像坐标系到像素坐标系转换矩阵
            R_pxtoimg = np.array([[1 / self.dx, 0, self.u_0], [0, 1 / self.dy, self.v_0], [0, 0, 1]])
            # 目标在图像坐标系下的坐标
            xy_i_1 = np.dot(np.linalg.inv(R_pxtoimg), uv_px_1)
            xy_i_2 = np.dot(np.linalg.inv(R_pxtoimg), uv_px_2)
            print('1号相机图像坐标系下的目标坐标（单位：mm）', xy_i_1)
            print('2号相机图像坐标系下的目标坐标（单位：mm）', xy_i_2)
            # 目标在成像平面上的投影点在相机坐标系下的坐标
            xy_c_1 = np.array([xy_i_1[0] / 1000, xy_i_1[1] / 1000, self.f / 1000]).T
            xy_c_2 = np.array([xy_i_2[0] / 1000, xy_i_2[1] / 1000, self.f / 1000]).T
            print('1系下的目标在1系成像平面投影的坐标（单位：m）', xy_c_1)
            print('2系下的目标在2系成像平面投影的坐标（单位：m）', xy_c_2)
            # 将两点转换到M系
            xy_m_1 = np.dot(self.R_1_to_m, xy_c_1) + self.T_1_to_m
            xy_m_2 = np.dot(self.R_2_to_m, xy_c_2) + self.T_2_to_m
            print('M系下的目标在1系成像平面投影的坐标', xy_m_1)
            print('M系下的目标在2系成像平面投影的坐标', xy_m_2)

            # 两相机原点在M下的坐标
            O_c = np.array([0, 0, 0]).T
            O_m_1 = np.dot(self.R_1_to_m, O_c) + self.T_1_to_m
            O_m_2 = np.dot(self.R_2_to_m, O_c) + self.T_2_to_m
            print('M系下的1系原点的坐标', O_m_1)
            print('M系下的2系原点的坐标', O_m_2)
            # 每个相机的原点和目标投影点构成的向量
            O_xy_1 = np.array([O_m_1[0] - xy_m_1[0], O_m_1[1] - xy_m_1[1], O_m_1[2] - xy_m_1[2]]).T
            O_xy_2 = np.array([O_m_2[0] - xy_m_2[0], O_m_2[1] - xy_m_2[1], O_m_2[2] - xy_m_2[2]]).T
            # 两直线的方向向量
            O_xy_1 = O_xy_1 / np.linalg.norm(O_xy_1)
            O_xy_2 = O_xy_2 / np.linalg.norm(O_xy_2)
            # 两直线的公共法向量
            line_normal_vector = np.cross(O_xy_1, O_xy_2)
            print('两直线的公共法向量为：', line_normal_vector)
            # 各取两直线对应的O_m点做向量O1_O2
            O_1_O_2 = np.array([xy_m_1[0] - xy_m_2[0], xy_m_1[1] - xy_m_2[1], xy_m_1[2] - xy_m_2[2]]).T
            # 两直线的距离即为O_1_O_2在公共法向量上的投影
            error_line = abs(np.dot(O_1_O_2, line_normal_vector)) / np.linalg.norm(line_normal_vector)
            print('两直线间的距离为：', error_line)
            # O_xy_1和法向量所构成平面的法向量
            # plane_normal_vector = np.cross(O_xy_1, line_normal_vector)
            # print('平面的法向量为：', plane_normal_vector)

        # 测试旋转矩阵是否正确
        # 世界坐标系，M系X为W系Y，M系Y为W系Z，M系Z为W系X
        # yx_m = np.array([-0.63, 0.83, 1.61]).T
        yx_m = np.array([-1.39, 0.95, -4.14]).T
        print(self.T_m_to_1, self.T_m_to_2)
        # M系到1系和2系
        print("mytest")

        yx_c_1 = np.dot(self.R_m_to_1, yx_m + self.T_m_to_1)
        yx_c_2 = np.dot(self.R_m_to_2, yx_m + self.T_m_to_2)

        print(self.R_m_to_2)

        print(np.dot(self.R_m_to_2, self.T_m_to_2))
        print(np.dot(self.R_m_to_2, yx_m))
        print(yx_c_2)

        xy_m_1 = np.dot(self.R_1_to_m, yx_c_1) + self.T_1_to_m
        xy_m_2 = np.dot(self.R_1_to_m, yx_c_1) + self.T_1_to_m
        print('验证1系M系变换矩阵：', xy_m_1)
        print('验证2系M系变换矩阵：', xy_m_2)

        # yx_c_1 = np.array([-5.1194531, 0.19373804, -21.32443588])

        # 1系2系到像素坐标系
        uv_px_1 = np.dot(mtx, yx_c_1) / yx_c_1[2]
        uv_px_2 = np.dot(mtx, yx_c_2) / yx_c_2[2]
        print('目标1像素坐标：', uv_px_1)
        print('目标2像素坐标：', uv_px_2)

        # 从1系转换到M系，将1系下的目标坐标转换到M系
        # xy_m = np.dot(R_1_to_m, xy_c_1) + T_1_to_m
        # print('中间坐标系下目标坐标为: ' + "\n" + str(xy_m))
        # 3.从M系转换到W系，将M系下的目标坐标转换到W系，仅将XYZ坐标调换即可，X→Y Y→Z Z→X

    def coordinate_transformation(self):
        # 已知两个相机在世界坐标系（W系）下的坐标1系和2系，1系和2系由W系 先平移后旋转得到，旋转为 逆时针 按照XYZ的顺序 外旋，涉及的四个坐标系全部为左手系
        # 需要求W系到1系和2系的转换矩阵
        #
        # 设中间坐标系（M系），与W系原点相同，M系X为W系Y，M系Y为W系Z，M系Z为W系X
        # W系：原平移矩阵为[camera_degree[0], camera_degree[1], camera_degree[2]]，M系：旋转顺序等效为[camera_degree[1], camera_degree[2], camera_degree[0]]
        # W系：原旋转顺序为逆时针外旋XYZ，M系：旋转顺序等效为逆时针外旋ZXY，进一步将M系下的旋转等效为逆时针按YXZ顺序内旋

        # 1.求由M系到1系的变换矩阵，先平移再按YXZ顺序逆时针内旋
        T_mto1 = np.array([-self.camera_xy_1[1], -self.camera_xy_1[2], -self.camera_xy_1[0]]).T  # 平移矩阵
        roll_mto1 = self.camera_degree_1[1] * np.pi / 180  # x
        pitch_mto1 = -self.camera_degree_1[2] * np.pi / 180  # y
        yaw_mto1 = self.camera_degree_1[0] * np.pi / 180  # z
        R_mto1_x, R_mto1_y, R_mto1_z = self.compute_rotation_matrix(roll_mto1, pitch_mto1, yaw_mto1)
        R_mto1 = np.dot(np.dot(R_mto1_z, R_mto1_x), R_mto1_y)
        # if not R_verification(R_mto1,
        #                       self.camera_degree_1[1] * np.pi / 180, self.camera_degree_1[2] * np.pi / 180,
        #                       self.camera_degree_1[0] * np.pi / 180):
        #     print('roll计算错误，请调整参数')

        # 2.求由1系到M系的变换矩阵，先按ZXY的顺序顺时针内旋再平移
        R_1tom_x, R_1tom_y, R_1tom_z = self.compute_rotation_matrix(-roll_mto1, -pitch_mto1, -yaw_mto1)  # 欧拉角相反
        R_1tom = np.dot(np.dot(R_1tom_y, R_1tom_x), R_1tom_z)  # 旋转顺序相反
        T_1tom = -T_mto1  # 平移矩阵相反

        # 3.求由M系到2系的变换矩阵，先平移再按YXZ顺序逆时针内旋
        T_mto2 = np.array([-self.camera_xy_2[1], -self.camera_xy_2[2], -self.camera_xy_2[0]]).T  # 平移矩阵
        roll_mto2 = self.camera_degree_2[1] * np.pi / 180
        pitch_mto2 = -self.camera_degree_2[2] * np.pi / 180
        yaw_mto2 = self.camera_degree_2[0] * np.pi / 180
        R_mto2_x, R_mto2_y, R_mto2_z = self.compute_rotation_matrix(roll_mto2, pitch_mto2, yaw_mto2)
        R_mto2 = np.dot(np.dot(R_mto2_z, R_mto2_x), R_mto2_y)

        # 4.求由2系到M系的变换矩阵，先按ZXY的顺序顺时针内旋再平移
        R_2tom_x, R_2tom_y, R_2tom_z = self.compute_rotation_matrix(-roll_mto2, -pitch_mto2, -yaw_mto2)  # 欧拉角相反
        R_2tom = np.dot(np.dot(R_2tom_y, R_2tom_x), R_2tom_z)  # 旋转顺序相反
        T_2tom = -T_mto2  # 平移矩阵相反

        return R_1tom, T_1tom, R_mto1, T_mto1, R_mto2, T_mto2, R_2tom, T_2tom

    def get_position_by_uv(self, p1, p2):
        """
        __________>(u)
        |
        |
        |
        \/(v)
        Args:
            pos1: 第一个相机(u,v)横向坐标pixel 纵向坐标pixel
            pos2: 第二个相机(u,v)横向坐标pixel 纵向坐标pixel

        Returns:
            W系下位置
        """
        pos1, pos2 = p1.copy(), p2.copy()
        pos1[1] = 1080 - pos1[1]
        pos2[1] = 1080 - pos2[1]
        mtx = np.array([[self.fx, 0, self.u_0], [0, self.fy, self.v_0], [0, 0, 1]])
        # R_1_to_m, T_1_to_m, R_m_to_1, T_m_to_1, R_m_to_2, T_m_to_2, R_2_to_m, T_2_to_m = self.coordinate_transformation()
        uv_px_1 = np.array(np.asarray([*pos1, 1])).T
        uv_px_2 = np.array(np.asarray([*pos2, 1])).T
        solvement_cross = True
        if solvement_cross:
            R = np.dot(self.R_m_to_2, self.R_1_to_m)
            T = np.dot(self.R_m_to_2, self.T_1_to_m) + np.dot(self.R_m_to_2, self.T_m_to_2)
            lambda_1 = np.dot(np.linalg.inv(mtx), uv_px_1)
            lambda_2 = np.dot(np.linalg.inv(mtx), uv_px_2)
            R_lambda_1 = np.dot(R, lambda_1)
            lambda_2_R_lambda_1 = np.cross(lambda_2, R_lambda_1)
            lambda_2_T = np.cross(lambda_2, T)
            a_1_0 = -lambda_2_T[0] / lambda_2_R_lambda_1[0]
            a_1_1 = -lambda_2_T[1] / lambda_2_R_lambda_1[1]
            a_1_2 = -lambda_2_T[2] / lambda_2_R_lambda_1[2]
            xy_c_1 = a_1_1 * np.dot(np.linalg.inv(mtx), uv_px_1)
            xy_m = np.dot(self.R_1_to_m, np.dot(self.R_m_to_1, self.T_1_to_m) + xy_c_1)
            return [xy_m[2], xy_m[0], xy_m[1]]

    def get_error_by_uv(self, p1, p2):
        """
                __________>(u)
        |
        |
        |
        \/(v)
        Args:
            pos1: 第一个相机(u,v)横向坐标pixel 纵向坐标pixel
            pos2: 第二个相机(u,v)横向坐标pixel 纵向坐标pixel

        Returns:
            accuracy: 精度，两直线间的垂直距离（单位：m）

        """
        pos1, pos2 = p1.copy(), p2.copy()
        pos1[1] = 1080 - pos1[1]
        pos2[1] = 1080 - pos2[1]
        uv_px_1 = np.array(np.asarray([*pos1, 1])).T
        uv_px_2 = np.array(np.asarray([*pos2, 1])).T
        solvement_meetline = True
        if solvement_meetline:
            # 图像坐标系到像素坐标系转换矩阵
            R_pxtoimg = np.array([[1 / self.dx, 0, self.u_0], [0, 1 / self.dy, self.v_0], [0, 0, 1]])
            # 目标在图像坐标系下的坐标
            xy_i_1 = np.dot(np.linalg.inv(R_pxtoimg), uv_px_1)
            xy_i_2 = np.dot(np.linalg.inv(R_pxtoimg), uv_px_2)
            # 目标在成像平面上的投影点在相机坐标系下的坐标
            xy_c_1 = np.array([xy_i_1[0] / 1000, xy_i_1[1] / 1000, self.f / 1000]).T
            xy_c_2 = np.array([xy_i_2[0] / 1000, xy_i_2[1] / 1000, self.f / 1000]).T
            # 将两点转换到M系
            xy_m_1 = np.dot(self.R_1_to_m, xy_c_1) + self.T_1_to_m
            xy_m_2 = np.dot(self.R_2_to_m, xy_c_2) + self.T_2_to_m
            # 两相机原点在M下的坐标
            O_c = np.array([0, 0, 0]).T
            O_m_1 = np.dot(self.R_1_to_m, O_c) + self.T_1_to_m
            O_m_2 = np.dot(self.R_2_to_m, O_c) + self.T_2_to_m
            # 每个相机的原点和目标投影点构成的向量
            O_xy_1 = np.array([O_m_1[0] - xy_m_1[0], O_m_1[1] - xy_m_1[1], O_m_1[2] - xy_m_1[2]]).T
            O_xy_2 = np.array([O_m_2[0] - xy_m_2[0], O_m_2[1] - xy_m_2[1], O_m_2[2] - xy_m_2[2]]).T
            # 两直线的方向向量
            O_xy_1 = O_xy_1 / np.linalg.norm(O_xy_1)
            O_xy_2 = O_xy_2 / np.linalg.norm(O_xy_2)
            # 两直线的公共法向量
            line_normal_vector = np.cross(O_xy_1, O_xy_2)
            # 各取两直线对应的O_m点做向量O1_O2
            O_1_O_2 = np.array([xy_m_1[0] - xy_m_2[0], xy_m_1[1] - xy_m_2[1], xy_m_1[2] - xy_m_2[2]]).T
            # 两直线的距离即为O_1_O_2在公共法向量上的投影
            error_line = abs(np.dot(O_1_O_2, line_normal_vector)) / np.linalg.norm(line_normal_vector)
            return error_line

    @staticmethod
    def compute_rotation_matrix(roll, pitch, yaw):  # 左手坐标系，逆时针，内旋
        R_x_roll = np.array([[1, 0, 0],
                             [0, np.cos(roll), -np.sin(roll)],
                             [0, np.sin(roll), np.cos(roll)]])
        R_y_pitch = np.array([[np.cos(pitch), 0, np.sin(pitch)],
                              [0, 1, 0],
                              [-np.sin(pitch), 0, np.cos(pitch)]])
        R_z_yaw = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                            [np.sin(yaw), np.cos(yaw), 0],
                            [0, 0, 1]])
        return R_x_roll, R_y_pitch, R_z_yaw


def test_data():
    cc12 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_1=[-210.00, -0, 15.00], camera_xy_2=[-230.00, -20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_1=[0, 15, 180], camera_degree_2=[0, 15, 90])
    cc21 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_2=[-210.00, -0, 15.00], camera_xy_1=[-230.00, -20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_2=[0, 15, 180], camera_degree_1=[0, 15, 90])
    cc13 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_1=[-210.00, -0, 15.00], camera_xy_2=[-250.00, 0, 15.00],  # 世界坐标系下的坐标
                        camera_degree_1=[0, 15, 180], camera_degree_2=[0, 15, 0])
    cc31 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_2=[-210.00, -0, 15.00], camera_xy_1=[-250.00, 0, 15.00],  # 世界坐标系下的坐标
                        camera_degree_2=[0, 15, 180], camera_degree_1=[0, 15, 0])
    cc14 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_1=[-210.00, -0, 15.00], camera_xy_2=[-230.00, 20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_1=[0, 15, 180], camera_degree_2=[0, 15, -90])
    cc41 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_2=[-210.00, -0, 15.00], camera_xy_1=[-230.00, 20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_2=[0, 15, 180], camera_degree_1=[0, 15, -90])
    cc23 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_1=[-230.00, -20.00, 15.00], camera_xy_2=[-250.00, 0, 15.00],  # 世界坐标系下的坐标
                        camera_degree_1=[0, 15, 90], camera_degree_2=[0, 15, 0])
    cc32 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_2=[-230.00, -20.00, 15.00], camera_xy_1=[-250.00, 0, 15.00],  # 世界坐标系下的坐标
                        camera_degree_2=[0, 15, 90], camera_degree_1=[0, 15, 0])
    cc24 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_1=[-230.00, -20.00, 15.00], camera_xy_2=[-230.00, 20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_1=[0, 15, 90], camera_degree_2=[0, 15, -90])
    cc42 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_2=[-230.00, -20.00, 15.00], camera_xy_1=[-230.00, 20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_2=[0, 15, 90], camera_degree_1=[0, 15, -90])
    cc34 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_1=[-250.00, 0, 15.00], camera_xy_2=[-230.00, 20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_1=[0, 15, 0], camera_degree_2=[0, 15, -90])
    cc43 = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
                        target_img_dir="./test/", target_img_num=2,
                        f=35, dx=23.76 / 1920, dy=13.365 / 1080,
                        camera_xy_2=[-250.00, 0, 15.00], camera_xy_1=[-230.00, 20.00, 15.00],  # 世界坐标系下的坐标
                        camera_degree_2=[0, 15, 0], camera_degree_1=[0, 15, -90])
    cc_matrix = [[0, cc12, cc13, cc14], [cc21, 0, cc23, cc24], [cc31, cc32, 0, cc34], [cc41, cc42, cc43, 0]]

    import re
    import json

    data = [[], [], [], []]
    for i in range(1, 5):
        with open('./data/data' + str(i), 'r', encoding='utf8') as f:
            line = f.readline()
            while line:
                a = re.search(r"\[.*\n", line)
                if a:
                    data_line = a.group()[0:-1]
                    data[i - 1].append([[(x[0] + x[2]) / 2, (x[1] + x[3]) / 2] for x in json.loads(data_line)])
                line = f.readline()

    poses = [[], [], []]
    error_thred = 0.2
    frame_num = 299
    links = [[] for i in range(frame_num)]
    delta = 0
    jk = 0
    for framei in range(0 + delta, frame_num + delta):  # 遍历所有帧
        for i in range(len(data) - 1):  # 第一台相机
            for j in range(i + 1, len(data)):  # 第二台相机
                # 第i+1台相机的所有目标
                targetsi = data[i][framei]
                # 第j+1台相机的所有目标
                targetsj = data[j][framei]
                # 计算两台相机所有目标之间的精度
                # error_martix_tmp = [[float("inf") for j in range(targetsj)] for i in range(len(targetsi))]
                for indexti, ti in enumerate(targetsi):
                    for indextj, tj in enumerate(targetsj):
                        # error_martix_tmp[indexti][indextj] = cc_matrix[i][j].get_error_by_uv(ti, tj)
                        # 误差小于0.1的视为同一个目标
                        error = cc_matrix[i][j].get_error_by_uv(ti, tj)
                        if error < error_thred:
                            links[framei - delta].append([str(i) + str(indexti),str(j) + str(indextj),error])

        flinks = links[framei - delta]
        G = nx.Graph()
        for link in flinks:
            u, v, d = link
            G.add_edge(u, v, weight=d)

        for target in nx.connected_components(G):  # 最大连通子图
            g = G.subgraph(target)
            edges = sorted(g.edges(data=True), key=lambda t: t[2].get('weight', 1))

            c1, c2 = edges[0][0], edges[0][1]
            while c1[0] == c2[0]:
                c1, c2 = random.sample(target, 2)  # 随机选取子图中的两个点

            # c1 = target.pop()
            # c2 = target.pop()
            pos = (cc_matrix[int(c1[0])][int(c2[0])]).get_position_by_uv(data[int(c1[0])][framei][int(c1[1])],
                                                                         data[int(c2[0])][framei][int(c2[1])])

            poses[0].append(round(pos[0], 3))
            poses[1].append(round(pos[1], 3))
            poses[2].append(round(pos[2], 3))
            print(','.join([str(i) for i in pos]))
            # jk += 1
    return poses
    # for index, link in enumerate(links):
    #     print(index, link)
    #
    # cg = []
    # for framei in range(0 + delta, frame_num + delta):
    #     flinks = links[framei - delta]
    #     G = nx.Graph()
    #     for link in flinks:
    #         a, b = link.split("-")
    #         G.add_edge(a, b)
    #
    #     bc = nx.centrality.betweenness_centrality(G, normalized=False)
    #     ebc = nx.centrality.edge_betweenness_centrality(G, normalized=False)
    #
    #     print("节点编号及其节点介数最大值为：")
    #     bc_list = sorted(bc.items(), key=operator.itemgetter(1))
    #     print(bc_list)
    #
    #     print("-----------------------------------")
    #     print("节点编号及其边介数最大值为：")
    #     ebc_list = sorted(ebc.items(), key=operator.itemgetter(1))
    #     print(ebc_list)
    #
    #     print(flinks)
    #
    #     z = list(nx.connected_components(G))
    #     print(len(z))
    #     cg.append(z)

    # for framei in range(0 + delta, frame_num + delta):
    #     for target in cg[framei - delta]:
    #         c1, c2 = random.sample(target, 2)
    #         # c1 = target.pop()
    #         # c2 = target.pop()
    #         pos = (cc_matrix[int(c1[0])][int(c2[0])]).get_position_by_uv(data[int(c1[0])][framei][int(c1[1])],
    #                                                                    data[int(c2[0])][framei][int(c2[1])])
    #         print(pos)
    #     print("----")


if __name__ == '__main__':
    # stereo_vision = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
    #                              target_img_dir="./test/", target_img_num=2,
    #                              f=35, dx=23.76 / 1920, dy=13.365 / 1080,
    #                              camera_xy_1=[-15, -10, -10], camera_xy_2=[-15, 0.25, 0],  # 世界坐标系下的坐标
    #                              camera_degree_1=[0, 30, 45], camera_degree_2=[0, 0, 0])
    # stereo_vision = StereoVision(frame_w=1920, frame_h=1080, mouse_event=cv2.EVENT_LBUTTONDBLCLK,
    #                              target_img_dir="./test/", target_img_num=2,
    #                              f=35, dx=23.76 / 1920, dy=13.365 / 1080,
    #                              camera_xy_1=[-210.00, 0, 15.00], camera_xy_2=[-230.00, -20.00, 15.00],  # 世界坐标系下的坐标
    #                              camera_degree_1=[0, 15, 180], camera_degree_2=[0, 15, 90])

    test_data()
    # import re
    # import json
    #
    # data = [[], [], [], []]
    # for i in range(1, 5):
    #     with open('./data/data' + str(i), 'r', encoding='utf8') as f:
    #         line = f.readline()
    #         while line:
    #             a = re.search(r"\[.*\n", line)
    #             if a:
    #                 data_line = a.group()[0:-1]
    #                 data[i - 1].append([[(x[0] + x[2]) / 2, (x[1] + x[3]) / 2] for x in json.loads(data_line)])
    #             line = f.readline()
    #
    # # 第 x+1 部相机 第 y 帧 第 z 个目标
    # pos1 = data[0][1][0]
    # pos2 = data[1][1][0]
    # print(pos1, pos2)
    #
    # print(stereo_vision.get_position_by_uv(pos1, pos2))
    # print(stereo_vision.get_error_by_uv(pos1, pos2))
    # # 若设置从图片获取目标像素坐标，会弹出图片窗口，双击计算得出目标像素，距离和目标世界坐标会打印出来，

    # stereo_vision.compute_distance()
