#!/usr/bin/env python

import json
import os
import shutil
from dataclasses import asdict
from itertools import accumulate
from pprint import pformat

import matplotlib.pyplot as plt
import pandas as pd

from lerobot.configs import parser
from lerobot.configs.critical_frame_detector import CriticalFrameDetectorConfig


def create_or_replace_directory(directory):
    if os.path.exists(directory):
        # 询问用户是否删除
        response = input(f"目录 '{directory}' 已存在，是否删除？(y/n): ")
        if response.lower() != 'y':
            print("操作已取消。程序终止")
            return False

        # 删除目录
        try:
            shutil.rmtree(directory)
            print(f"目录 '{directory}' 已删除。")
        except Exception as e:
            print(f"删除目录时出错: {e}")
            return False

    # 创建目录
    try:
        os.makedirs(directory, exist_ok=True)
        print(f"目录 '{directory}' 已创建/已存在。")
        return True
    except Exception as e:
        print(f"创建目录时出错: {e}")
        return False


class CriticalFrameDetector:
    """关键帧识别"""

    def __init__(self, cfg: CriticalFrameDetectorConfig):
        """
        初始化

        参数:
        dataset_path: 数据集路径
        gripper_config: 指定电机索引 [idx1, idx2, ...]
        clip_view: 需要剪辑的视角 ["xxx", "xxx"],包含all则为全部都剪辑
        min_current_threshold: 电流阈值，高于此值视为"电流比较大"
        max_velocity_threshold: 速度阈值，低于此值视为"速度为0"
        key_frame_training_weight: 加权权重
        n_forward_expansion: 向前传播的帧数
        n_backward_expansion: 向后传播的帧数
        """
        self.gripper_indices = cfg.gripper_config
        self.clip_view = cfg.clip_view
        self.gripper_names = [f'gripper_{i}' for i in range(len(cfg.gripper_config))]
        self.current_threshold = cfg.min_current_threshold
        self.velocity_threshold = cfg.max_velocity_threshold
        self.key_frame_training_weight = cfg.key_frame_training_weight
        self.n_forward_expansion = cfg.n_forward_expansion
        self.n_backward_expansion = cfg.n_backward_expansion
        self.dataset_path = cfg.dataset_path
        with open(os.path.join(self.dataset_path, "meta", "info.json"), 'r', encoding='utf-8') as f:
            self.meta_info = json.load(f)
        self.run_main()

    def find_start_time(self, df):
        group_durations = df.groupby('episode_index')['timestamp'].max() + 1 / self.meta_info.get("fps", 30)
        start_time_lst = list(accumulate(group_durations.tolist()))
        return [0] + start_time_lst[:-1]

    def extract_data(self, series, data_type='state'):
        """提取电机数据"""
        result = []

        for x in series:
            if hasattr(x, '__getitem__'):
                if self.gripper_indices is None:
                    # 自动检测所有电机
                    result.append(list(x))
                    if self.gripper_names is None:
                        self.gripper_names = [f'gripper_{i}' for i in range(len(x))]
                else:
                    # 提取指定索引的电机数据
                    gripper_data = []
                    for idx in self.gripper_indices:
                        if 0 <= idx < len(x) or -len(x) <= idx < 0:
                            gripper_data.append(x[idx])
                        else:
                            gripper_data.append(0)
                    result.append(gripper_data)
            else:
                # 处理非可索引数据
                num_gripper = len(self.gripper_indices) if self.gripper_indices is not None else 1
                result.append([0] * num_gripper)

        return result

    def calculate_training_weight(self, row):
        """
        计算训练权重
        如果任何一个电机的速度较小且电流较大，则权重为自定义权重，否则为1
        """
        # 检查每个电机
        for i in range(len(row['velocity'])):
            # 获取当前电机的速度和电流
            velocity = row['velocity'][i]
            current = row['gripper_current'][i]

            # 判断速度是否接近0且电流是否超过阈值
            if abs(velocity) < self.velocity_threshold and abs(current) > self.current_threshold:
                return self.key_frame_training_weight

        # 没有电机满足条件，权重为1
        return 1

    def calculate_gripper_velocity(self, state_series, timestamp_series):
        """计算电机速度"""
        velocities = []

        for i in range(len(state_series)):
            if i == 0:
                # 第一个时间点，速度为0
                num_gripper = len(state_series.iloc[i]) if hasattr(state_series.iloc[i], '__len__') else 1
                velocities.append([0.0] * num_gripper)
            else:
                dt = timestamp_series.iloc[i] - timestamp_series.iloc[i - 1]
                current_state = state_series.iloc[i]
                prev_state = state_series.iloc[i - 1]

                # 确保状态数据格式正确
                if (hasattr(current_state, '__len__') and
                        hasattr(prev_state, '__len__') and
                        len(current_state) == len(prev_state)):

                    gripper_velocities = []
                    for j in range(len(current_state)):
                        if dt > 0:
                            v = (current_state[j] - prev_state[j]) / dt
                        else:
                            v = 0.0
                        gripper_velocities.append(v)
                    velocities.append(gripper_velocities)
                else:
                    # 如果状态数据格式不一致，返回全0
                    num_gripper = len(current_state) if hasattr(current_state, '__len__') else 1
                    velocities.append([0.0] * num_gripper)

        return velocities

    def propagate_weights(self, df):
        """
        key_frame_training_weight的前n条和后n条数据的权重也设为key_framea_training_weight
        这样可以确保抓取前后的关键动作都有更高的训练权重
        """
        # 创建权重列的副本
        new_weights = df['training_weight'].copy()

        # 找到所有权重为key_frame_training_weight的索引
        weight_indices = df[df['training_weight'] == self.key_frame_training_weight].index

        # 对于每个权重为key_frame_training_weight的索引，将其前n条和后n条数据的权重也设为key_frame_training_weight
        for idx in weight_indices:
            # 向前传播
            start_idx_forward = max(0, idx - self.n_forward_expansion)
            for i in range(start_idx_forward, idx):
                new_weights.iloc[i] = self.key_frame_training_weight

            # 向后传播
            end_idx_backward = min(len(df) - 1, idx + self.n_backward_expansion)
            for i in range(idx + 1, end_idx_backward + 1):
                new_weights.iloc[i] = self.key_frame_training_weight

        return new_weights

    def find_weight_intervals(self, df):
        """
        找出所有权重不为1的连续时间段
        返回时间段列表，每个时间段是(start_time, end_time)的元组
        """
        intervals = []
        in_interval = False
        start_time = 0
        end_time = 0
        episode_index = 0
        start_time_lst = self.find_start_time(df)
        for i, row in df.iterrows():
            if row['training_weight'] != 1 and not in_interval:
                # 开始一个新的时间段
                in_interval = True
                start_time = row['timestamp'] + start_time_lst[row["episode_index"]]
                start_index = row['index']
            elif (row['training_weight'] == 1 and in_interval) or (
                    row['episode_index'] != episode_index and in_interval):
                # 结束当前时间段
                in_interval = False
                end_time = df.iloc[i - 1]['timestamp'] + start_time_lst[row["episode_index"]]
                end_index = df.iloc[i - 1]['index']
                intervals.append(((start_time, end_time), (start_index, end_index), episode_index))
            episode_index = row['episode_index']

        # 处理最后一个时间段
        if in_interval:
            end_time = df.iloc[-1]['timestamp'] + start_time_lst[row["episode_index"]]
            end_index = df.iloc[-1]['index']
            intervals.append(((start_time, end_time), (start_index, end_index), episode_index))
        return intervals

    def generate_video_clip_commands(self, df, video_file, output_dir):
        """
        生成视频剪辑命令
        返回一个包含ffmpeg命令的列表，用于剪辑权重不为1的时间段
        """
        os.makedirs(output_dir, exist_ok=True)
        weight_intervals = self.find_weight_intervals(df)
        commands = []
        for i, ((start_time, end_time), _, episode_index) in enumerate(weight_intervals):
            # 计算持续时间
            duration = end_time - start_time

            # 生成输出文件名
            output_file = os.path.join(output_dir,
                                       f"clip_{i + 1:03d}_episode{episode_index}_({start_time:.2f}-{end_time:.2f}).mp4")

            # 生成ffmpeg命令
            cmd = f'ffmpeg -i "{video_file}" -ss {start_time} -t {duration} -c copy "{output_file}"'
            commands.append(cmd)
        return commands

    def analyze(self, df, current_col="observation.current", state_col="observation.state", timestamp_col="timestamp"):
        """执行完整分析"""
        # 提取数据
        df['gripper_current'] = self.extract_data(df[current_col])
        df['gripper_state'] = self.extract_data(df[state_col])

        # 计算速度
        df['velocity'] = self.calculate_gripper_velocity(df['gripper_state'], df[timestamp_col])

        # 计算训练权重
        df['training_weight'] = df.apply(self.calculate_training_weight, axis=1)

        # 传播权重
        df['training_weight'] = self.propagate_weights(df)

        return df.drop(["gripper_current", "gripper_state", "velocity"], axis=1)

    def write_parquet(self, df, path):
        df.to_parquet(f'{path}', index=False)

    def get_clip_view(self):
        all_view_names = []
        all_view_dir = []
        view_dirs = []
        for i in self.meta_info.get("features").keys():
            if "observation.images" in i:
                all_view_dir.append(i)
                all_view_names.append(i.split(".")[-1])
        if "all" not in self.clip_view:
            for i in self.clip_view:
                if i not in all_view_names:
                    self.clip_view.remove(i)
                    print(f"数据集没有{i}视角，不会生成{i}视角的clip view.")
                else:
                    view_dirs.append(f"observation.images.{i}")
        else:
            self.clip_view = all_view_names
            view_dirs = all_view_dir
        return view_dirs

    def visualize_weight(self, df):
        plt.figure(figsize=(12, 6))
        plt.plot(df['index'], df['training_weight'], 'o-', markersize=1, linewidth=0.1, color="black")
        weight_intervals = self.find_weight_intervals(df)

        # 用于收集图例的句柄和标签
        legend_handles = []
        legend_labels = []

        # 添加主曲线的句柄和标签
        line_handle, = plt.plot(df['index'], df['training_weight'], 'o-', markersize=1, linewidth=0.1)
        legend_handles.append(line_handle)
        legend_labels.append('training weight')

        # 绘制高亮区域，只为第一个区域创建句柄
        for i, (_, (start, end), episode_index) in enumerate(weight_intervals):
            # 绘制高亮区域
            vspan = plt.axvspan(start, end, alpha=0.2, color='red')
            # 如果是第一个区域，保存其句柄用于图例
            if i == 0:
                legend_handles.append(vspan)
                legend_labels.append('key frame')
        # 获取所有的数据集索引
        unique_episodes = df['episode_index'].unique()
        unique_episodes.sort()  # 确保按顺序排列

        # 在每个数据集之间添加分隔线
        for i in range(1, len(unique_episodes)):
            # 找到前一个数据集的最后一个索引位置
            prev_episode_end = df[df['episode_index'] == unique_episodes[i - 1]]['index'].max()
            # 找到当前数据集的第一个索引位置
            current_episode_start = df[df['episode_index'] == unique_episodes[i]]['index'].min()

            # 在两个数据集之间取中间位置画虚线
            # 如果数据集连续，则直接在前一个数据集结束位置画线
            separation_line_x = prev_episode_end + 0.5  # 中间位置

            # 绘制黑色虚线
            plt.axvline(x=separation_line_x, color='black', linestyle='--', linewidth=1, alpha=0.7)

        # 可选：在第一个数据集之前添加标签，说明数据集编号
        for episode in unique_episodes:
            # 找到当前数据集的中间位置
            episode_data = df[df['episode_index'] == episode]
            episode_mid = episode_data['index'].mean()

            # 在数据集中间位置上方添加标签
            plt.text(episode_mid, plt.ylim()[1] * 0.95,
                     f'Ep{episode}',
                     ha='center', va='bottom',
                     fontsize=9, alpha=0.7,
                     bbox=dict(boxstyle="round,pad=0.3", facecolor="white", alpha=0.5))

        plt.xlabel('index')
        plt.ylabel('weight')
        plt.title('Training Weight')
        plt.yticks([1, 2])
        plt.grid(True, alpha=0.3)

        # 使用自定义的句柄和标签创建图例
        plt.legend(legend_handles, legend_labels)

        png_path = os.path.join(self.dataset_path, 'training_weights_bidirectional_distribution.png')
        plt.savefig(png_path, dpi=150)
        print(f"\n训练权重分布图已保存为 {png_path}")
        plt.close()
        return

    def modify_meta_info(self):
        self.meta_info['features']['training_weight'] = {
            "dtype": "float32",
            "shape": [
                1
            ],
            "names": None
        }
        with open(os.path.join(self.dataset_path, "meta", "info.json"), "w", encoding="utf-8") as f:
            json.dump(self.meta_info, f, ensure_ascii=False, indent=4)

    def run_main(self):
        data_dir = os.path.join(self.dataset_path, "data")
        files = [os.path.join(root, file) for root, dirs, files in os.walk(data_dir) for file in files]
        view_dirs = self.get_clip_view()
        self.modify_meta_info()

        # 确保clip输出目录存在,若已经存在就覆盖
        clip_dir = os.path.join(self.dataset_path, "video_clips")
        if not create_or_replace_directory(clip_dir):
            return

        for file in files:
            df = pd.read_parquet(file)
            df = self.analyze(df)
            weight_intervals = self.find_weight_intervals(df)
            file_name = file.split("data")[-1].split(".")[0].strip('/')
            self.visualize_weight(df)
            print(f"{file}\n权重不为1的时间段:")
            for i, ((start, end), _, episode_index) in enumerate(weight_intervals):
                print(
                    f"episode{episode_index},时间段 {i + 1}: {start:.2f}s - {end:.2f}s (持续时间: {end - start:.2f}s)")
            for view in view_dirs:
                clip_output_path = os.path.join(clip_dir, view, file_name)
                video_path = os.path.join(self.dataset_path, "videos", view, f"{file_name}.mp4")
                commands = self.generate_video_clip_commands(df, video_path, clip_output_path)
                for command in commands:
                    os.system(command)
            self.write_parquet(df, file)
        return


@parser.wrap()
def main(cfg: CriticalFrameDetectorConfig):
    pformat(asdict(cfg))
    cfg.validate()
    print(cfg)
    CriticalFrameDetector(cfg)


if __name__ == '__main__':
    main()
