#!/usr/bin/env python

# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
将双臂机器人数据集 dataset-fanguo 转换为 v2.1 格式

该脚本将原始数据集转换为符合 LeRobot v2.1 规范的格式，包括：
- 创建 meta 目录和相关元数据文件
- 重组图像文件到新的目录结构
- 生成 parquet 格式的数据文件
- 计算每个 episode 的统计信息

用法:
```bash
python convert_dataset_fanguo_to_v2_1.py \
    --input-dir=/home/dw/workspaces/AGI/lerobot/dataset-fanguo \
    --output-dir=/home/dw/workspaces/AGI/lerobot/dataset-fanguo-v2.1 \
    --repo-id=your-username/dataset-fanguo-v2.1 \
    --push-to-hub=False
```
"""

import argparse
import json
import logging
import os
import shutil
import jsonlines
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple

import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm

# 常量定义
CODEBASE_VERSION = "v2.1"
DEFAULT_CHUNK_SIZE = 1000  # 每个 chunk 最大 episode 数量

# 路径常量
INFO_PATH = "meta/info.json"
EPISODES_PATH = "meta/episodes.jsonl"
EPISODES_STATS_PATH = "meta/episodes_stats.jsonl"
TASKS_PATH = "meta/tasks.jsonl"
DEFAULT_IMAGE_PATH = "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.png"
DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet"

# 默认特征
DEFAULT_FEATURES = {
    "timestamp": {"dtype": "float32", "shape": (1,), "names": None},
    "frame_index": {"dtype": "int64", "shape": (1,), "names": None},
    "episode_index": {"dtype": "int64", "shape": (1,), "names": None},
    "index": {"dtype": "int64", "shape": (1,), "names": None},
    "task_index": {"dtype": "int64", "shape": (1,), "names": None},
}

# 工具函数
def write_json(data: dict, fpath: Path) -> None:
    """将数据写入 JSON 文件"""
    fpath.parent.mkdir(exist_ok=True, parents=True)
    with open(fpath, "w") as f:
        json.dump(data, f, indent=4, ensure_ascii=False)

def write_jsonlines(data: list, fpath: Path) -> None:
    """将数据写入 JSONL 文件"""
    fpath.parent.mkdir(exist_ok=True, parents=True)
    with jsonlines.open(fpath, "w") as writer:
        writer.write_all(data)

def append_jsonlines(data: dict, fpath: Path) -> None:
    """向 JSONL 文件追加数据"""
    fpath.parent.mkdir(exist_ok=True, parents=True)
    with jsonlines.open(fpath, "a") as writer:
        writer.write(data)

def write_info(info: dict, local_dir: Path):
    """写入 info.json 文件"""
    write_json(info, local_dir / INFO_PATH)

def write_episode(episode: dict, local_dir: Path):
    """写入 episode 元数据"""
    append_jsonlines(episode, local_dir / EPISODES_PATH)

def write_task(task_index: int, task: dict, local_dir: Path):
    """写入任务信息"""
    # 将任务字典转换为 JSON 字符串，使其可哈希
    import json
    task_json = json.dumps(task)
    task_dict = {
        "task_index": task_index,
        "task": task_json,
    }
    append_jsonlines(task_dict, local_dir / TASKS_PATH)

def ensure_ndim_at_least_1(value):
    """确保值是至少 1 维的数组"""
    if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0):
        return [value]
    elif isinstance(value, list):
        if not value:  # 空列表
            return [0.0]  # 添加默认值
        if np.isscalar(value[0]) or (isinstance(value[0], np.ndarray) and value[0].ndim == 0):
            return value  # 已经是1维列表
        return [ensure_ndim_at_least_1(item) for item in value]
    return value

def serialize_dict(stats: dict) -> dict:
    """序列化字典中的 numpy 数组，并确保所有值都是至少 1 维的数组"""
    serialized = {}
    for key, value in stats.items():
        if isinstance(value, dict):
            serialized[key] = serialize_dict(value)
        elif isinstance(value, np.ndarray):
            serialized[key] = ensure_ndim_at_least_1(value.tolist())
        elif isinstance(value, np.generic):
            serialized[key] = ensure_ndim_at_least_1(value.item())
        else:
            serialized[key] = ensure_ndim_at_least_1(value)
    return serialized

def write_episode_stats(episode_index: int, episode_stats: dict, local_dir: Path):
    """写入 episode 统计信息"""
    episode_stats = {"episode_index": episode_index, "stats": serialize_dict(episode_stats)}
    append_jsonlines(episode_stats, local_dir / EPISODES_STATS_PATH)

# 特征统计计算函数
def get_feature_stats(data, axis=0, keepdims=False):
    """计算特征的统计信息"""
    return {
        "count": data.shape[0],
        "mean": np.mean(data, axis=axis, keepdims=keepdims),
        "std": np.std(data, axis=axis, keepdims=keepdims),
        "min": np.min(data, axis=axis, keepdims=keepdims),
        "max": np.max(data, axis=axis, keepdims=keepdims),
    }

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 常量定义
CODEBASE_VERSION = "v2.1"
DEFAULT_CHUNK_SIZE = 1000  # 每个 chunk 最大 episode 数量

# 相机映射关系
CAMERA_MAPPING = {
    "color_0": "head_camera",
    "color_2": "left_camera",
    "color_3": "right_camera",
}

# 关节映射关系
JOINT_MAPPING = {
    "left_arm": "left_arm",
    "right_arm": "right_arm",
    "left_hand": "left_gripper",
    "right_hand": "right_gripper",
}


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="将 dataset-fanguo 转换为 v2.1 格式")
    parser.add_argument(
        "--input-dir",
        type=str,
        required=True,
        help="原始数据集目录路径",
    )
    parser.add_argument(
        "--output-dir",
        type=str,
        required=True,
        help="输出数据集目录路径",
    )
    parser.add_argument(
        "--repo-id",
        type=str,
        default=None,
        help="Hugging Face 仓库 ID，格式为 'username/dataset-name'",
    )
    parser.add_argument(
        "--push-to-hub",
        action="store_true",
        help="是否将转换后的数据集推送到 Hugging Face Hub",
    )
    parser.add_argument(
        "--num-workers",
        type=int,
        default=4,
        help="并行处理的工作线程数",
    )
    return parser.parse_args()


def load_episode_data(episode_dir: Path) -> Dict[str, Any]:
    """加载单个 episode 的数据"""
    data_file = episode_dir / "data.json"
    if not data_file.exists():
        raise FileNotFoundError(f"数据文件不存在: {data_file}")
    
    with open(data_file, "r") as f:
        return json.load(f)


def extract_episode_index(episode_dir: Path) -> int:
    """从目录名提取 episode 索引"""
    return int(episode_dir.name.split("_")[1])


def create_directory_structure(output_dir: Path) -> None:
    """创建 v2.1 格式的目录结构"""
    # 创建元数据目录
    (output_dir / "meta").mkdir(parents=True, exist_ok=True)
    
    # 创建数据目录
    (output_dir / "data").mkdir(parents=True, exist_ok=True)
    
    # 创建图像目录
    for camera in CAMERA_MAPPING.values():
        (output_dir / "images" / camera).mkdir(parents=True, exist_ok=True)


def create_features_dict(episode_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
    """创建 features 字典"""
    features = DEFAULT_FEATURES.copy()
    
    # 添加相机特征
    for camera_key, camera_name in CAMERA_MAPPING.items():
        features[camera_name] = {
            "dtype": "image",
            "shape": (3, 480, 640),  # 从 info 中获取
            "names": ["c", "h", "w"],  # 添加标准的图像通道名称
        }
    
    # 添加关节特征
    # 左臂关节位置
    features["observation.left_arm.qpos"] = {
        "dtype": "float32",
        "shape": (6,),
        "names": None,
    }
    
    # 右臂关节位置
    features["observation.right_arm.qpos"] = {
        "dtype": "float32",
        "shape": (6,),
        "names": None,
    }
    
    # 左夹爪位置
    features["observation.left_gripper.qpos"] = {
        "dtype": "float32",
        "shape": (1,),
        "names": None,
    }
    
    # 右夹爪位置
    features["observation.right_gripper.qpos"] = {
        "dtype": "float32",
        "shape": (1,),
        "names": None,
    }
    
    # 动作特征 - 左臂
    features["action.left_arm.qpos"] = {
        "dtype": "float32",
        "shape": (6,),
        "names": None,
    }
    
    # 动作特征 - 右臂
    features["action.right_arm.qpos"] = {
        "dtype": "float32",
        "shape": (6,),
        "names": None,
    }
    
    # 动作特征 - 左夹爪
    features["action.left_gripper.qpos"] = {
        "dtype": "float32",
        "shape": (1,),
        "names": None,
    }
    
    # 动作特征 - 右夹爪
    features["action.right_gripper.qpos"] = {
        "dtype": "float32",
        "shape": (1,),
        "names": None,
    }
    
    return features


def create_info_json(episode_data: Dict[str, Any], features: Dict[str, Dict[str, Any]], output_dir: Path) -> Dict[str, Any]:
    """创建 info.json 文件"""
    info = {
        "codebase_version": CODEBASE_VERSION,
        "fps": 30,  # 从原始数据获取
        "features": features,
        "use_videos": False,
        "robot_type": "Fangnuo",  # 假设是 so100 机器人
        "total_episodes": 0,  # 稍后更新
        "total_frames": 0,  # 稍后更新
        "total_tasks": 0,  # 稍后更新
        "chunks_size": DEFAULT_CHUNK_SIZE,
        "image_path": DEFAULT_IMAGE_PATH,
        "parquet_path": DEFAULT_PARQUET_PATH,
        "data_path": DEFAULT_PARQUET_PATH,  # 添加 data_path 字段
        "video_path": "videos/{video_key}/episode_{episode_index:06d}.mp4",  # 添加 video_path 字段
    }
    
    return info


def process_episode(
    episode_dir: Path,
    output_dir: Path,
    episode_index: int,
    features: Dict[str, Dict[str, Any]],
    task_index: int,
) -> Tuple[Dict[str, Any], Dict[str, Any], int]:
    """处理单个 episode 的数据"""
    logger.info(f"处理 episode {episode_index}")
    
    # 加载 episode 数据
    episode_data = load_episode_data(episode_dir)
    
    # 创建 episode 元数据
    episode_meta = {
        "episode_index": episode_index,
        "task_index": task_index,
        "length": len(episode_data["data"]),
        "fps": 30,  # 从原始数据获取
    }
    
    # 创建 episode 目录
    episode_chunk = episode_index // DEFAULT_CHUNK_SIZE
    for camera_name in CAMERA_MAPPING.values():
        (output_dir / "images" / camera_name / f"episode_{episode_index:06d}").mkdir(parents=True, exist_ok=True)
    
    # 创建数据 chunk 目录
    (output_dir / "data" / f"chunk-{episode_chunk:03d}").mkdir(parents=True, exist_ok=True)
    
    # 准备 DataFrame 数据
    df_data = []
    
    # 处理每一帧
    for frame_idx, frame in enumerate(episode_data["data"]):
        frame_data = {}
        
        # 基本信息
        frame_data["timestamp"] = np.array([frame["timestamp"]], dtype=np.float32)
        frame_data["frame_index"] = np.array([frame_idx], dtype=np.int64)
        frame_data["episode_index"] = np.array([episode_index], dtype=np.int64)
        frame_data["index"] = np.array([frame_idx], dtype=np.int64)
        frame_data["task_index"] = np.array([task_index], dtype=np.int64)
        
        # 复制并重命名图像
        for old_key, new_key in CAMERA_MAPPING.items():
            if old_key in frame["colors"]:
                src_path = episode_dir / frame["colors"][old_key]
                dst_path = output_dir / "images" / new_key / f"episode_{episode_index:06d}" / f"frame_{frame_idx:06d}.png"
                
                # 复制图像文件
                if src_path.exists():
                    # 如果源文件是 jpg，转换为 png
                    if src_path.suffix.lower() == ".jpg":
                        img = Image.open(src_path)
                        img.save(dst_path)
                    else:
                        shutil.copy2(src_path, dst_path)
                
                # 在 DataFrame 中记录图像路径
                frame_data[new_key] = str(dst_path.relative_to(output_dir))
        
        # 处理关节状态
        for old_key, new_key in JOINT_MAPPING.items():
            if old_key in frame["states"]:
                if "qpos" in frame["states"][old_key]:
                    qpos = frame["states"][old_key]["qpos"]
                    frame_data[f"observation.{new_key}.qpos"] = np.array(qpos, dtype=np.float32)
        
        # 处理动作
        for old_key, new_key in JOINT_MAPPING.items():
            if old_key in frame["actions"]:
                if "qpos" in frame["actions"][old_key]:
                    qpos = frame["actions"][old_key]["qpos"]
                    frame_data[f"action.{new_key}.qpos"] = np.array(qpos, dtype=np.float32)
        
        df_data.append(frame_data)
    
    # 创建 DataFrame
    df = pd.DataFrame(df_data)
    
    # 保存为 parquet 文件
    parquet_path = output_dir / "data" / f"chunk-{episode_chunk:03d}" / f"episode_{episode_index:06d}.parquet"
    df.to_parquet(parquet_path)
    
    # 计算统计信息
    episode_stats = {}
    for key, ft in features.items():
        if key in df.columns:
            if ft["dtype"] == "image":
                # 图像统计信息需要特殊处理，这里简化处理
                episode_stats[key] = {
                    "count": len(df),
                    "mean": np.zeros((3, 1, 1), dtype=np.float32),
                    "std": np.ones((3, 1, 1), dtype=np.float32),
                    "min": np.zeros((3, 1, 1), dtype=np.float32),
                    "max": np.ones((3, 1, 1), dtype=np.float32),
                }
            else:
                # 非图像数据统计
                data = np.stack(df[key].values)
                axes_to_reduce = 0
                keepdims = data.ndim == 1
                episode_stats[key] = get_feature_stats(data, axis=axes_to_reduce, keepdims=keepdims)
    
    return episode_meta, episode_stats, len(df_data)


def convert_dataset(args):
    """转换整个数据集"""
    input_dir = Path(args.input_dir)
    output_dir = Path(args.output_dir)
    
    if not input_dir.exists():
        raise FileNotFoundError(f"输入目录不存在: {input_dir}")
    
    # 创建输出目录
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 创建目录结构
    create_directory_structure(output_dir)
    
    # 查找所有 episode 目录
    episode_dirs = sorted([d for d in input_dir.iterdir() if d.is_dir() and d.name.startswith("episode_")])
    
    if not episode_dirs:
        raise ValueError(f"在 {input_dir} 中未找到 episode 目录")
    
    # 加载第一个 episode 的数据作为参考
    first_episode_data = load_episode_data(episode_dirs[0])
    
    # 创建 features 字典
    features = create_features_dict(first_episode_data)
    
    # 创建 info.json
    info = create_info_json(first_episode_data, features, output_dir)
    
    # 处理任务信息
    task_index = 0
    task_info = {
        "goal": first_episode_data["text"]["goal"],
        "description": first_episode_data["text"]["desc"],
        "steps": first_episode_data["text"]["steps"],
    }
    write_task(task_index, task_info, output_dir)
    
    # 更新 info
    info["total_tasks"] = 1
    info["total_episodes"] = len(episode_dirs)
    
    # 并行处理所有 episode
    total_frames = 0
    with ThreadPoolExecutor(max_workers=args.num_workers) as executor:
        futures = {}
        for episode_dir in episode_dirs:
            episode_index = extract_episode_index(episode_dir)
            future = executor.submit(
                process_episode,
                episode_dir,
                output_dir,
                episode_index,
                features,
                task_index,
            )
            futures[future] = episode_index
        
        # 收集结果
        for future in tqdm(as_completed(futures), total=len(futures), desc="处理 episodes"):
            episode_index = futures[future]
            try:
                episode_meta, episode_stats, num_frames = future.result()
                
                # 写入 episode 元数据
                write_episode(episode_meta, output_dir)
                
                # 写入 episode 统计信息
                write_episode_stats(episode_index, episode_stats, output_dir)
                
                # 更新总帧数
                total_frames += num_frames
                
            except Exception as e:
                logger.error(f"处理 episode {episode_index} 时出错: {e}")
    
    # 更新 info 中的总帧数
    info["total_frames"] = total_frames
    
    # 写入 info.json
    write_info(info, output_dir)
    
    logger.info(f"数据集转换完成，共 {info['total_episodes']} 个 episodes，{info['total_frames']} 帧")
    
    # 推送到 Hub
    if args.push_to_hub and args.repo_id:
        try:
            from huggingface_hub import HfApi
            
            logger.info(f"正在将数据集推送到 Hugging Face Hub: {args.repo_id}")
            api = HfApi()
            api.create_repo(args.repo_id, repo_type="dataset", exist_ok=True)
            api.upload_folder(
                folder_path=str(output_dir),
                repo_id=args.repo_id,
                repo_type="dataset",
            )
            logger.info(f"数据集已成功推送到: https://huggingface.co/datasets/{args.repo_id}")
        except Exception as e:
            logger.error(f"推送到 Hub 时出错: {e}")


if __name__ == "__main__":
    args = parse_args()
    convert_dataset(args)
