import time
import sys
import os
import logging

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

from protocol.decorators import edp_service, instant
from protocol.prelude import todo
import cv2
import numpy as np
DBG = True

@edp_service
class DataCleaningService:
    name = "data_cleaning"
    desc = "数据清洗服务"
    category = "cleaning"
    tasks = [
        "coords_transform_0: (List<PointF>, Json)->List<PointF>",
        "coords_transform_1: (PointF, Json)->PointF",
        "re_calib_oc2p: (Str, Json)->Json",
        "bridge_1: (List<List<(PointF, Confidence)>>, Json)->List<List<((PointF, Json), Confidence)>>",
        "keypoint_selector: List<(PointF, Confidence)>->(Pose, Confidence)",
        # "keypoint_selector_fin: Str->(Pose, Confidence)",
        "keypoint_selector_fin: Str->Json",
        "fbpoint_to_pose: List<(PointF, PointF, Confidence)> -> List<(Pose, Confidence)>",
        "mask_to_contour: Mask->?List<PointF>",
        "mask_to_contour_fin: Str->?List<PointF>",
        "contour_to_pose: List<PointF> -> Pose",
        "normalize_thetas: List<Float>->List<Float>",
        "stimulus_segment: List<(Int, Bool)>->List<(Int, Int, Bool)>",
    ]
    def coords_transform_0(self, inputs):
        points = inputs[0]
        if "dist" in inputs[1]:
            dist = inputs[1]["dist"]
            rvec = inputs[1]["rvec"]
            tvec = inputs[1]["tvec"]
            mtx = inputs[1]["mtx"]
            new_points = cv2.projectPoints(np.array(points), rvec, tvec, mtx, dist)[0]
            if DBG: print(f"{points[0]} -> {new_points[0]}")
            return new_points
        elif "matrix" in inputs[1]:
            matrix = inputs[1]["matrix"]
            new_points = []
            for point in points:
                x = point[0]
                y = point[1]
                denominator = x * matrix[2][0] + y * matrix[2][1] + matrix[2][2]
                wx = (x * matrix[0][0] + y * matrix[0][1] + matrix[0][2]) / denominator
                wy = (x * matrix[1][0] + y * matrix[1][1] + matrix[1][2]) / denominator
                new_points.append([wx, wy])
            if DBG: print(f"{points[0]} -> {new_points[0]}")
            return new_points
        else:
            raise ValueError("Invalid input")
    @instant
    def coords_transform_1(self, inputs):
        point = inputs[0]
        if "dist" in inputs[1]:
            dist = inputs[1]["dist"]
            rvec = inputs[1]["rvec"]
            tvec = inputs[1]["tvec"]
            mtx = inputs[1]["mtx"]
            point_array = np.array([[point]], dtype=np.float32)
            undistorted_point = cv2.undistortPoints(point_array, mtx, dist)
            # 反向投影
            ray = np.array([undistorted_point[0][0][0], undistorted_point[0][0][1], 1.0])
            R, _ = cv2.Rodrigues(rvec)
            camera_position = -np.dot(R.T, tvec).reshape(3)
            ray_world = np.dot(R.T, ray)
            t = -camera_position[2] / ray_world[2]
            new_point = [camera_position[0] + t * ray_world[0], camera_position[1] + t * ray_world[1]]
            if DBG: print(f"{point} -> {new_point}")
            return new_point
        elif "matrix" in inputs[1]:
            matrix = inputs[1]["matrix"]
            x = point[0]
            y = point[1]
            denominator = x * matrix[2][0] + y * matrix[2][1] + matrix[2][2]
            wx = (x * matrix[0][0] + y * matrix[0][1] + matrix[0][2]) / denominator
            wy = (x * matrix[1][0] + y * matrix[1][1] + matrix[1][2]) / denominator
            if DBG: print(f"{point} -> {wx, wy}")
            return [wx, wy]
        else:
            raise ValueError("Invalid input")
    def re_calib_oc2p(self, inputs):
        filepath = inputs[0]
        params = inputs[1]
        if "dist" in params:
            dist = inputs[1]["dist"]
            rvec = inputs[1]["rvec"]
            tvec = inputs[1]["tvec"]
            mtx = inputs[1]["mtx"]
            try:
                img = cv2.imread(filepath)
            except:
                ret, img = cv2.VideoCapture(filepath).read()
                if not ret:
                    assert 0 == 1, "Invalid FilePath"
            dst = cv2.undistort(img, mtx, dist)
            todo()
        else:
            raise NotImplementedError
    @instant
    def bridge_1(self, inputs):
        json_data = inputs[1]
        for x in inputs[0]:
            for y in x:
                y[0] = [y[0], json_data]
        return inputs[0]
    @instant
    def keypoint_selector(self, inputs):
        points = [x[0] for x in inputs]
        confidences = [x[1] for x in inputs]
        if len(points) == 2:
            head, tail = points
            center = (np.array(head) + np.array(tail)) / 2
            dy = head[1] - tail[1]
            dx = head[0] - tail[0]
            theta = np.degrees(np.arctan2(dy, dx))
            return [[center[0], center[1], theta], np.mean(confidences)]
        elif len(points) == 4:
            points = np.array(points)
            head, left, right, tail = points
            additional_points = np.array([
                (head + left) / 2,
                (head + right) / 2,
                (tail + left) / 2,
                (tail + right) / 2
            ])
            all_points = np.vstack((points, additional_points))
            ellipse = cv2.fitEllipse(all_points.astype(np.float32))
            center = ellipse[0]
            cv_angle = -ellipse[2]
            dy = head[1] - tail[1]
            dx = head[0] - tail[0]
            user_angle = np.degrees(np.arctan2(dy, dx))
            if cv_angle > user_angle:
                while cv_angle - user_angle > 45:
                    cv_angle -= 90
            else:
                while cv_angle - user_angle < -45:
                    cv_angle += 90
            if DBG:
                print(f"Original CV angle: {cv_angle}, User angle: {user_angle}, Adjusted: {cv_angle}")
            return [[center[0], center[1], cv_angle], np.mean(confidences)]
        elif len(points) < 5:
            raise ValueError("Invalid Input")
        else:
            points = np.array(points)
            head = points[0]
            tail = points[-1]
            ellipse = cv2.fitEllipse(points.astype(np.float32))
            center = ellipse[0]
            cv_angle = -ellipse[2]
            dy = head[1] - tail[1]
            dx = head[0] - tail[0]
            user_angle = np.degrees(np.arctan2(dy, dx))
            if cv_angle > user_angle:
                while cv_angle - user_angle > 45:
                    cv_angle -= 90
            else:
                while cv_angle - user_angle < -45:
                    cv_angle += 90
            if DBG:
                print(f"Original CV angle: {cv_angle}, User angle: {user_angle}, Adjusted: {cv_angle}")
            return [[center[0], center[1], cv_angle], np.mean(confidences)]
    def keypoint_selector_2(self, inputs):
        points = inputs[0]
        confidences = inputs[1]
        if len(points) == 2:
            head, tail = points
            head = head[0]
            tail = tail[0]
            center = (np.array(head) + np.array(tail)) / 2
            dy = head[1] - tail[1]
            dx = head[0] - tail[0]
            theta = np.degrees(np.arctan2(dy, dx))
            return [[center[0].item(), center[1].item(), theta.item()], np.mean(confidences).item()]
        else:
            raise NotImplementedError
    @instant
    def keypoint_selector_fin(self, inputs):
        filepath = inputs
        def open_pickle_file(file_path: str) -> dict:
            import pickle
            with open(file_path, 'rb') as f:
                return pickle.load(f)
        content = open_pickle_file(filepath)
        for each in content:
            if each == "metadata":
                continue
            # print(each)
            coords = content[each]['coordinates'][0]
            confs = content[each]['confidence']
            content[each] = self.keypoint_selector_2((coords, confs))
        del content["metadata"]
        return content
    @instant
    def fbpoint_to_pose(self, inputs):
        res = []
        for (xf, yf), (xb, yb), conf in inputs:
            center = [(xf + xb) / 2, (yf + yb) / 2]
            theta = np.degrees(np.arctan2(yf - yb, xf - xb))
            res.append([[*center, theta], conf])
        return res
    @instant
    def mask_to_contour(self, inputs):
        inputs = np.array(inputs, dtype=np.uint8)
        if len(inputs.shape) > 2 and inputs.shape[2] > 1:
            inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2GRAY)
        _, binary_mask = cv2.threshold(inputs, 127, 255, cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            if DBG:
                print("No contours found in mask")
            return None
        max_contour = max(contours, key=cv2.contourArea)
        contour_points = max_contour.reshape(-1, 2).tolist()
        return contour_points
    @instant
    def mask_to_contour_fin(self, inputs):
        filepath = inputs
        data = cv2.imread(filepath)
        res = self.mask_to_contour(data)
        return res
    def test_mask_to_contour(self):
        try:
            data = cv2.imread(r"M:\DLC-input\mask0.png")
            if data is None:
                print("Error: Unable to read image file")
                return
            print(f"Image shape: {data.shape}, dtype: {data.dtype}")
            res = self.mask_to_contour(data)
            print(f"Result: {len(res)} contour points")
            if len(res) > 0:
                print(f"First point: {res[0]}")
        except Exception as e:
            print(f"Error in test_mask_to_contour: {str(e)}")
            import traceback
            traceback.print_exc()
    @instant
    def contour_to_pose(self, inputs):
        contour = np.array(inputs, dtype=np.float32)
        ellipse = cv2.fitEllipse(contour)
        center = ellipse[0]
        theta = -ellipse[2]
        axis = ellipse[1]
        if axis[1] > axis[0]:
            theta += 90
        return [*center, theta]
    # 超过180度跳变
    @instant
    def normalize_thetas(self, inputs):
        prev = inputs[0]
        while prev > 180:
            prev -= 360
        while prev < -180:
            prev += 360
        inputs[0] = prev
        for i in range(1, len(inputs)):
            while inputs[i] - prev > 180:
                inputs[i] -= 360
            while inputs[i] - prev < -180:
                inputs[i] += 360
            prev = inputs[i]
        return inputs
    @instant
    def stimulus_segment(self, inputs):
        min_interval = 30 # 0.5s
        res = []
        lprev = -1
        rprev = -1
        lstart = -1
        rstart = -1
        for frame, right in inputs:
            if right:
                if rprev == -1:
                    rstart = frame
                    rprev = frame
                elif frame - rprev >= min_interval:
                    res.append([rstart, rprev, True])
                    rstart = frame
                    rprev = frame
                else:
                    rprev = frame
            else:
                if lprev == -1:
                    # print('new stimulus:', frame)
                    lstart = frame
                    lprev = frame
                elif frame - lprev >= min_interval:
                    # print('new stimulus:', frame)
                    res.append([lstart, lprev, False])
                    lstart = frame
                    lprev = frame
                else:
                    # print('prev', frame)
                    lprev = frame
        if rprev > -1:
            res.append([rstart, rprev, True])
        if lprev > -1:
            res.append([lstart, lprev, False])
        return res
if __name__ == "__main__":
    DataCleaningService.app.run(port=5030)
    # fp = r"C:\Users\songy\Desktop\fyp-songy-2025-05-17\videos\fyp_20250326172041DLC_Resnet101_fypMay17shuffle1_snapshot_110_full.pickle"
    # res = DataCleaningService().keypoint_selector_fin(fp)
    # print(res['frame0000'])
    # import json
    # inputs = json.load(open(r"C:\Users\songy\Desktop\data-0518\pose0.json", 'r'))
    # # res = DataCleaningService().normalize_thetas(inputs)
    # thetas = json.load(open(r"C:\Users\songy\Desktop\data-0518\smooth.json", 'r'))
    # res = [[[x[0][0], x[0][1], y], x[1]] for (x, y) in zip(inputs.values(), thetas)]
    # json.dump(res, open(r"C:\Users\songy\Desktop\data-0518\pose.json", 'w'))
