import cv2
import cv2.aruco as aruco
import numpy as np
import time
import threading
import platform
import logging
from camera import Camera
from calibration import Calibration

font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
is_linux = platform.system().lower() == 'linux'

def new_estimatePoseSingleMarkers(corners, marker_size, mtx, distortion):
    '''
    This will estimate the rvec and tvec for each of the marker corners detected by:
       corners, ids, rejectedImgPoints = detector.detectMarkers(image)
    corners - is an array of detected corners for each detected marker in the image
    marker_size - is the size of the detected markers
    mtx - is the camera matrix 【相机内参】
    distortion - is the camera distortion matrix【相机畸变矩阵】
    RETURN list of rvecs, tvecs, and trash (so that it corresponds to the old estimatePoseSingleMarkers())
    '''
    marker_points = np.array([[-marker_size / 2,  marker_size / 2, 0],
                              [marker_size  / 2,  marker_size / 2, 0],
                              [marker_size  / 2, -marker_size / 2, 0],
                              [-marker_size / 2, -marker_size / 2, 0]], dtype=np.float32)
    trash = []
    rvecs = []
    tvecs = []

    for c in corners:
        nada, R, t = cv2.solvePnP(marker_points, c, mtx, distortion, False, cv2.SOLVEPNP_IPPE_SQUARE)
        rvecs.append(R)
        tvecs.append(t)
        trash.append(nada)

    return np.array([rvecs]), np.array([tvecs]), trash

def loop_and_detect(det):
    dictionary = aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_1000)
    parameters = aruco.DetectorParameters()
    detector   = aruco.ArucoDetector(dictionary, parameters)

    #------------------------------------------------------------------------------
    # 计算新的相机内参
    #------------------------------------------------------------------------------
    frame = det.cam.read()
    if frame is None:
        logging.error("No image received, exiting...")
        return
    #logging.info(frame.shape[:2]) # 720*1280
    h1, w1 = frame.shape[:2]
    # print(f"实际分辨率: {w1}×{h1}")
    alpha = 0.0 # 平衡去畸变和视野(决定是否保留黑边或裁剪图像) 0=保留全部像素 1=裁剪最大
    new_K, roi = cv2.getOptimalNewCameraMatrix(det.mtx, det.dist, (h1, w1), alpha, (h1, w1))
    map1, map2 = cv2.initUndistortRectifyMap(det.mtx, det.dist, None, new_K, (w1, h1), cv2.CV_16SC2)

    # ============================= 亚像素角点优化参数 =============================
    # 迭代终止条件：30 次或精度达到 0.001 px 就停
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.001)
    # 搜索窗口半宽（全窗大小=2*win+1）
    winSize = (5, 5)

    while det.is_running:
        time.sleep(0.1) # Slow down the loop to 10Hz
        frame = det.cam.read()
        if frame is None:
            logging.error("No image received, exiting...")
            time.sleep(1)
            continue
        # logging.info(f'{frame.shape} FPS {det.cam.fps}')

        #------------------------------------------------------------------------------
        # 启动图片处理
        #------------------------------------------------------------------------------
        # 以后每帧都只用 remap 后的图做算法
        rect_img = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR)

        # 灰度化用于检测aruco标签，所用字典类型参考ARUCO_DICT
        gray = cv2.cvtColor(rect_img, cv2.COLOR_BGR2GRAY)

        #使用detectMarkers函数可以检测到marker，返回ID和标志板的4个角点坐标
        (corners, ids, rejected) = detector.detectMarkers(gray)
        if not is_linux:
            frame_markers = aruco.drawDetectedMarkers(rect_img.copy(), corners, ids)

        # ============================= 亚像素角点优化处理 =============================
        for c in corners:
            cv2.cornerSubPix(gray, c.reshape(-1, 1, 2), winSize, (-1, -1), criteria)

        if ids is not None:
            #获取aruco返回的rvec旋转矩阵、tvec位移矩阵
            #rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.095, new_K, det.dist)
            rvec, tvec, _ = new_estimatePoseSingleMarkers(corners, 0.25, new_K, det.dist)

            # 估计每个标记的姿态并返回值rvet和tvec ---不同
            #rvec为旋转矩阵，tvec为位移矩阵
            # from camera coeficcients
            (rvec-tvec).any() # get rid of that nasty numpy value array error
            #print(tvec)

            #标注auruco标签各轴
            if not is_linux:
                for i in range(rvec.shape[0]):
                    pass
                    #cv2.drawFrameAxes(frame_markers, det.mtx, det.dist, rvec[i, :, :], tvec[i, :, :], 0.05*1.5, 2)
                    #aruco.drawDetectedMarkers(frame_markers, corners, ids)

            ###### 显示id标记 #####
            if not is_linux:
                cv2.putText(frame, "Id: " + str(ids), (0,64), font, 1, (0,255,0),2, cv2.LINE_AA)

            ###### 距离估计 #####
            oftX = (tvec[0][0][0])
            oftY = (tvec[0][0][1])
            dist = (tvec[0][0][2])
            logging.info(f'X:{oftX} Y:{oftY} Z:{dist}m')

            # 更新测量数据以及时间戳
            det.x = oftX[0]
            det.y = oftY[0]
            det.z = dist[0]
            det.ts = time.time()
            ###### 角度估计 #####

        # 显示结果框架
        if not is_linux:
            cv2.imshow("frame", frame_markers)
            #cv2.imshow("frame", gray)

        key = cv2.waitKey(1)
        if key == 27: # 按esc键退出
            det.is_running = False
            print('esc break...')
            det.cam.release()
            cv2.destroyAllWindows()
            break

class Detecter():
    """ Detecter for ARUCO markers.
    This class is designed to detect ARUCO markers in a video stream from a camera.
    """

    def __init__(self, cam:Camera):
        self.cam = cam
        # 加载CAM校准参数:
        calib = Calibration()
        calib.load('./标定文件.yaml')
        self.dist = calib.dist # CAM distortion matrix【相机畸变矩阵】 畸变参数(k1, k2, p1, p2, k3)
        self.mtx = calib.mtx   # CAM matrix【原始相机内参】
        self.is_running = False
        self.thread = None
        self.x = 0.0
        self.y = 0.0
        self.z = 0.0
        self.ts = 0.0
        self.start()

    def start(self):
        self.is_running = True
        self.thread = threading.Thread(target=loop_and_detect, args=(self,))
        self.thread.start()

    def stop(self):
        self.is_running = False

if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

    url = 'rtsp://192.168.144.119:554/live'
    #url = 'rtsp://192.168.144.119:554/H264?W=1280&H=720&BR=2000000&FPS=30'
    cam = Camera(url)
    det = Detecter(cam)
    print('Detecter is running')
    while True:
        time.sleep(1) # Simulate processing time
        if not det.is_running:
            break
    det.stop()
