import json
import logging
from collections import namedtuple

import numpy as np
import torch
from torch.utils.data import Dataset

logger = logging.getLogger(__name__)


KinematicBatch = namedtuple("KinematicBatch", "joint xyz rpy")
Transition = namedtuple("Transition", "action condition")


class KinematicData(Dataset):
    """
    The dataset contains the joint positions and the end-effector positions
    and the rpy of the end-effector
    joint: (6,) in radian
    rpy: (3,) in radian
    xyz: (3,) in mm
    """

    def __init__(self, data_file="data/json_data/points_can0.json", noise_level: float = 0.1):
        with open(data_file, "r") as f:
            data = json.load(f)
            self.joint = torch.tensor(data["joint"])
            self.xyz = torch.tensor(data["xyz"])
            self.rpy = torch.tensor(data["rpy"])
        self.noise_level = noise_level

    def __getitem__(self, idx):
        return KinematicBatch(
            joint=self.joint[idx],
            xyz=self.xyz[idx] + torch.randn_like(self.xyz[idx]) * self.noise_level,
            rpy=self.rpy[idx],
        )

    def __len__(self):
        return len(self.joint)


class TrajectoryPlanningData(Dataset):
    """
    Sequential data for planning
    data keys:
        joints: (N, 6) in radian, including the initial joint
        xquats: (N, 7) with xyz in meters and quaternion
        lengths: (N,) in steps

    horizon_steps: the number of action steps to predict:
    the action chunk is of dim=horizon_steps*6
    The minimum length of the trajectory is 2, including the current state and the next state

     New version:
        Diffusion models serve as the planner of xquats only!
        No joint data is needed!
    """

    def __init__(
        self,
        data_file="data/json_data/sim_data_1M_0.1.json",
        horizon_steps: int = 5,
        padding: int = False,
    ):

        logger.info(f"Loading data from '{data_file}' ...")
        with open(data_file, "r") as f:
            data = json.load(f)
            self.xeulers = torch.tensor(data["xeuler"])
            self.lengths = data["lengths"]

        # fixed angles
        self.xeulers[:, 3:] = torch.tensor([0, 2, 0])

        self.horizon_steps = horizon_steps
        self.padding = padding
        self.indices = self.make_indices()

        logger.info(f"Data loaded: #traj={len(self.lengths)}, #data={self.__len__()}")

    def make_indices(self):
        """
        Make indices for the data, each index is a tuple of (data_id, traj_step)
        """
        steps_to_go = [_ for l in self.lengths for _ in range(l - 1, -1, -1)]
        traj_step = [_ for l in self.lengths for _ in range(0, l)]

        indices = []
        for i, (s, t) in enumerate(zip(steps_to_go, traj_step)):
            if s > 0:  # the goal state requires no action
                if self.padding or t == 0 or s > self.horizon_steps - 1:
                    # either padding or is the start of a new trajectory or inside the horizon
                    indices.append((i, s))

        return indices

    def __getitem__(self, idx):
        """
        start: the start index of the trajectory in the dataset
        start + steps_to_go is the index of the goal(last) state
        """
        start, steps_to_go = self.indices[idx]
        xeulers = self.xeulers[start : start + steps_to_go + 1]

        # condition on current xquat and goal xquat
        # predict the middle points and the goal xquat
        return Transition(
            action=torch.stack([xeulers[min(1 + i, steps_to_go)] for i in range(self.horizon_steps)]),
            condition=torch.concat([xeulers[0], xeulers[-1]], dim=-1),
        )

    def __len__(self):
        """
        remove the last state in each trajectory
        """
        return len(self.indices)


def test_data_loader():
    import mediapy as media

    from sim.piper_simulator import MujocoPiperSim

    self = TrajectoryPlanningData(horizon_steps=20)
    sim = MujocoPiperSim()
    frames = []
    for i in range(100):
        for xe in self[i].action:
            sim.move_to(xe[:3], xe[3:])
            frames.append(sim.render_frame())
    media.show_video(frames, loop=False)


# class ArmJointData(Dataset):
#     """
#     The raw dataset: only contains the joint position and the arm position
#     The end position whould be re-calculated by the esitmated gripper length
#     """

#     def __init__(self, data_file):

#         with open(data_file, "r") as f:
#             data = json.load(f)
#             self.data = data

#             self.arm = torch.tensor(data["arm"])
#             self.joints = torch.tensor(data["joint"])

#         logger.info(f"Data size:{self.__len__()}")

#     def __getitem__(self, idx):
#         return Batch(x=self.arm[idx], y=self.joints[idx])

#     def __len__(self):
#         return len(self.joints)


# class EndJointData(ArmJointData):
#     def __init__(self, data_file, arm_end_length: float = 130):
#         super().__init__(data_file)
#         self.arm_end_length = arm_end_length
#         self.ends = torch.tensor(np.array([arm_end_translation(_, arm_end_length) for _ in self.data["arm"]]))

#     def __getitem__(self, idx):
#         return Batch(self.ends[idx][:3], self.joints[idx])


# class EndArmData(EndJointData):
#     def __init__(self, data_file, arm_end_length: float = 130):
#         super().__init__(data_file, arm_end_length)

#     def __getitem__(self, idx):
#         return Batch(self.ends[idx][:3], self.arm[idx])


# class NormalizedEndJointData(EndJointData):
#     """
#     Normalize the JointData to be [-1, 1]
#     """

#     def __init__(self, data_file, arm_end_length: float = 130):
#         super().__init__(data_file, arm_end_length)
#         self.joints = normalize_joint(self.joints)
#         self.ends = normalize_end(self.ends)


# class NormalizedEndJointSequenceData(NormalizedEndJointData):
#     """
#     Normalize the JointData to be [-1, 1]
#     """

#     def __init__(self, data_file, arm_end_length: float = 130, horizon_steps: int = 5, stride: int = 10):

#         self.horizon_steps = horizon_steps
#         self.stride = stride
#         self.strided_horizion = horizon_steps * stride
#         super().__init__(data_file, arm_end_length)

#         self.current_xyz = self.ends[: -self.strided_horizion, :3]
#         self.goal_xyz = self.ends[self.strided_horizion :, :3]
#         # condtion on (xt, goal-xt), which is 6d
#         self.conditions = torch.concat([self.current_xyz, self.goal_xyz - self.current_xyz], dim=-1)

#     def __getitem__(self, idx):
#         return Batch(self.conditions[idx], self.joints[idx + self.stride : idx + self.strided_horizion + 1: self.stride ])

#     def __len__(self):
#         return len(self.joints) - self.strided_horizion


# class NormalizedFKData(NormalizedEndJointData):
#     """
#     Given 6D joints -> predict 3D end positions
#     """

#     def __init__(self, data_file, arm_end_length: float = 130):
#         super().__init__(data_file, arm_end_length)

#     def __getitem__(self, idx):
#         return Batch(self.joints[idx], self.ends[idx][:3])


class EndJointTrajectoryData(Dataset):
    """
    The raw dataset: only contains the joint position and the arm position
    The end position whould be re-calculated by the esitmated gripper length
    """

    def __init__(self, data_file, horizon_steps: int = 5, stride: int = 1):

        self.horizon_steps = horizon_steps
        self.stride = stride

        with open(data_file, "r") as f:
            data = json.load(f)
            self.data = data

        # data augmentations: mirror movements
        mirror_arms = [l[::-1] for l in data["arms"]]
        mirror_joints = [l[::-1] for l in data["joints"]]
        mirror_ends = [l[::-1] for l in data["ends"]]

        self.arms = torch.tensor(data["arms"] + mirror_arms)  # currently not used
        self.joints = torch.tensor(data["joints"] + mirror_joints)
        self.ends = torch.tensor(data["ends"] + mirror_ends)
        self.traj_lengths = data["lengths"] + data["lengths"]
        self.indices = self.make_indices()

        logger.info(f"Data size:{self.__len__()}")

    def make_indices(self):
        """
        Make indices for the data, each index is a tuple of (data_id, traj_step)
        """
        steps_to_go = [_ for l in self.traj_lengths for _ in range(l - 1, -1, -1)]
        start = np.arange(self.__len__()).tolist()
        indices = [(s, t) for s, t in zip(start, steps_to_go)]
        return indices

    def __getitem__(self, idx):
        start, steps_to_go = self.indices[idx]
        goal_end = self.ends[start + steps_to_go]
        current_end = self.ends[start, :3]  # only x,y,z
        current_joint = self.joints[start, :3]

        action_chunks = []
        for i in range(1, self.horizon_steps + 1):
            """
            it gives padding if eary finished
            """
            action_chunks.append(self.joints[start + min(i * self.stride, steps_to_go)])
        return Transition(
            action=torch.stack(action_chunks, dim=0),  # (B, H, dim_a)
            condition=torch.concat([current_joint, current_end, goal_end], dim=-1),  # (B, 6)
        )

    def __len__(self):
        return len(self.joints) - self.stride  # at least one step to go

    # for i in data[20].action:
    # self.move_to_arm_joint(denormalize_joint(i.numpy()))
    # time.sleep(1)


# class FourierEndJointData(EndJointData):
#     """
#     Predict the [cos(j), sin(j)]
#     Then, solve j = sign(sin(j)) * arccos(j)
#     """

#     def __init__(self, data_file, arm_end_length: float = 129.535):
#         super().__init__(data_file, arm_end_length)
#         self.joints = torch.stack([self.transform(_) for _ in self.joints])
#         # (B, 12)

#     @staticmethod
#     def transform(joints):
#         """
#         return: [c1, s1, c2, s2, ...]
#         """
#         cos_j = torch.cos(joints / 180 * torch.pi)
#         sin_j = torch.sin(joints / 180 * torch.pi)
#         return torch.stack([cos_j, sin_j]).T.flatten()
