import math
import time

import cv2
import pyrealsense2 as rs
import os
import torch
import sys
import numpy as np
from ultralytics import YOLO
from utils.grasp_utils import get_angle_width, viewpoint_params_to_matrix, grasp_convert_xyzrpy, CameraInfo

"---------------------Robot library-------------------------"
from robot.UR5.ur_control import RobotControl
from pid_contronl import PIDController


class GraspExecute(object):
    def __init__(self):
        # load camera config
        self.pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        self.profile = self.pipeline.start(config)
        self.target_platform = 'GPU'
        grasp_object = "ros tea"
        self.z_distance = 1
        self.GRASP_THRESHOLD = 0.01
        self.dt = 0.2
        self.robot = RobotControl()
        self.pid = PIDController(kp=0.1, ki=0.01, kd=0.05)

    def grasp_pose(self, object_mask, camera):
        # minRect
        areas = []
        contours, hierarchy = cv2.findContours(object_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for c in range(len(contours)):
            areas.append(cv2.contourArea(contours[c]))
        max_id = areas.index(max(areas))
        rect = cv2.minAreaRect(contours[max_id])
        cx, cy, h, w, angle = rect[0][0], rect[0][1], max(rect[1]), min(rect[1]), rect[2]
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        
        # get rotation matrix
        z = camera.z_distance
        x = z / camera.fx * (cx - camera.cx)  # m
        y = z / camera.fy * (cy - camera.cy)  # m
        t = np.array([x, y, z], dtype=np.float32)
        width = w * z / camera.fx  # offset
        towards = np.array([0, 0, 0.1]) * np.pi
        angle, width, = get_angle_width(box, cx, cy, w, z, camera)
        angle = (angle / 180.0) * 3.14  # [0,3.14]
        rotation_matrix = viewpoint_params_to_matrix(towards, angle)
        # print(rotation_matrix)
        
        return width, rotation_matrix, t

    def torch_infer(self, model_path, image, show_image=False):
        model = YOLO(model_path)
        color_seg = cv2.resize(image, (640, 480))
        results = model.predict(color_seg, conf=0.5)
        result = results[0]
        if result.masks is None:
            print("Mask does not exist")
            # sys.exit()
        mask = result.masks[0]
        m = torch.squeeze(mask.data)
        composite = torch.stack((m, m, m), 2)
        seg_mask = color_seg * composite.cpu().numpy().astype(np.uint8)
        if show_image:
            cv2.namedWindow("result", cv2.WINDOW_FREERATIO)
            cv2.resizeWindow("result", (640, 480))
            res_plotted = results[0].plot()
            cv2.imshow("result", res_plotted)
            if cv2.waitKey(0) & 0xFF == ord("q"):
                cv2.destroyAllWindows()
        return seg_mask

    def segmentation_grasp(self):

        while True:
            start = time.time()
            robot_position = []
            target_position = []
            new_position = []
            # load data
            frames = self.pipeline.wait_for_frames()
            color_frame = frames.get_color_frame()

            if not color_frame:
                continue
            color = np.asanyarray(color_frame.get_data())
            intrinsic = self.profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()

            height = color.shape[0]
            width = color.shape[1]
            # set parameters
            camera = CameraInfo(1280.0, 720.0, float(intrinsic.fx), float(intrinsic.fy), float(intrinsic.ppx),
                                float(intrinsic.ppy), self.z_distance)
            seg_mask = None
            try:
                # YOLO v8 segmentation
                if self.target_platform == 'GPU':
                    model_path = '/home/jp/jiangpan/conveyor-realsense/best.pt'
                    seg_mask = self.torch_infer(model_path, color, show_image=False)
            except:
                print("None target have been found, please try it again..")
                self.robot.return_origin()
                continue
            # get object mask
            gray_img = cv2.cvtColor(seg_mask, cv2.COLOR_BGR2GRAY)
            _, object_mask = cv2.threshold(gray_img, 0.5, 1, cv2.THRESH_BINARY)
            object_mask = cv2.resize(object_mask, (width, height))

            # get object pose and cloud
            width, rotation_matrix, translation = self.grasp_pose(object_mask, camera)

            target_position = grasp_convert_xyzrpy(translation, rotation_matrix)
            end = time.time()
            # print("--------------------the loop time-------------------------", end-start)
            print("--------------------the target translation----------------", target_position)
            # get urTCP location
            robot_position = self.robot.get_TCPlocation()
            robot_x = robot_position[0]
            robot_y = robot_position[1]
            # robot_z = robot_position[2]
            error1 = target_position[0] 
            error2 = target_position[1] 
            adjustment_x = self.pid.update(error1, self.dt)
            adjustment_y = self.pid.update(error2, self.dt)
            new_position[0] = robot_position[0] + adjustment_x
            new_position[1] = robot_position[1] + adjustment_y
            new_position[2] = self.z_distance
            new_position[3] = 2.887
            new_position[4] = -1.196
            new_position[5] = -0.01
            # move robot
            self.robot.move_arm_to(new_position)
            # Check if the position is within the grasp range
            if abs(new_position[0] - target_position[0]) <= self.GRASP_THRESHOLD:
                print("Grasp condition met. Executing grasp.")
                self.robot.grasp(new_position)
                continue
            # Show images
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', color)

            key = cv2.waitKey(1)
            print(key)
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                self.pipeline.stop()
                break
        self.pipeline.stop()


if __name__ == "__main__":
    echo = GraspExecute()
    echo.segmentation_grasp()


class KalmanFilter:
    def __init__(self, process_variance, measurement_variance, estimated_measurement_variance):
        self.process_variance = process_variance
        self.measurement_variance = measurement_variance
        self.estimated_measurement_variance = estimated_measurement_variance
        self.posteri_estimate = 0.0
        self.posteri_error_estimate = 1.0

    def update(self, measurement):
        # Prediction update
        priori_estimate = self.posteri_estimate
        priori_error_estimate = self.posteri_error_estimate + self.process_variance

        # Measurement update
        blending_factor = priori_error_estimate / (priori_error_estimate + self.measurement_variance)
        self.posteri_estimate = priori_estimate + blending_factor * (measurement - priori_estimate)
        self.posteri_error_estimate = (1 - blending_factor) * priori_error_estimate

        return self.posteri_estimate