#!/usr/bin/env python

# author: wwq
"""
This script converts a local LeRobot dataset from version 2.0 to 2.1.
It generates per-episode stats, checks consistency with old stats, updates metadata,
and removes deprecated stats.json, all locally without Hugging Face Hub interaction.

Usage:
```bash
python src/lerobot/datasets/v21_local/convert_local_dataset_v20_to_v21.py \
    --data-dir=/home/zhq2004/XArm/lerobot_smolvla/datasets/pick_put_red \
    --num-workers=4
```

1.加载数据集：使用 LeRobotDataset 加载本地数据集（版本 2.0）。
2.生成逐集统计数据：计算每个 episode 的统计数据，保存到 episodes_stats.jsonl。
3.验证统计一致性：将新生成的统计数据与旧的 stats.json 进行比较。
4.更新元数据：将 info.json 中的 codebase_version 更新为 2.1。
5.删除旧统计文件：移除 stats.json 文件。
6.保存结果：确保所有变更保存到本地目录。

"""

import argparse
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path

import numpy as np
from tqdm import tqdm

from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.datasets.utils import EPISODES_STATS_PATH, STATS_PATH, load_stats, write_info, write_episode_stats
from lerobot.datasets.compute_stats import get_feature_stats, aggregate_stats, sample_indices  # 显式导入

V20 = "v2.0"
V21 = "v2.1"


class SuppressWarnings:
    def __enter__(self):
        self.previous_level = logging.getLogger().getEffectiveLevel()
        logging.getLogger().setLevel(logging.ERROR)

    def __exit__(self, exc_type, exc_val, exc_tb):
        logging.getLogger().setLevel(self.previous_level)


def sample_episode_video_frames(dataset: LeRobotDataset, episode_index: int, ft_key: str) -> np.ndarray:
    ep_len = dataset.meta.episodes[episode_index]["length"]
    sampled_indices = sample_indices(ep_len)  # 使用导入的 sample_indices
    query_timestamps = dataset._get_query_timestamps(0.0, {ft_key: sampled_indices})
    video_frames = dataset._query_videos(query_timestamps, episode_index)
    return video_frames[ft_key].numpy()


def convert_episode_stats(dataset: LeRobotDataset, ep_idx: int):
    ep_start_idx = dataset.episode_data_index["from"][ep_idx]
    ep_end_idx = dataset.episode_data_index["to"][ep_idx]
    ep_data = dataset.hf_dataset.select(range(ep_start_idx, ep_end_idx))

    ep_stats = {}
    for key, ft in dataset.features.items():
        if ft["dtype"] == "video":
            ep_ft_data = sample_episode_video_frames(dataset, ep_idx, key)
        else:
            ep_ft_data = np.array(ep_data[key])

        axes_to_reduce = (0, 2, 3) if ft["dtype"] in ["image", "video"] else 0
        keepdims = True if ft["dtype"] in ["image", "video"] else ep_ft_data.ndim == 1
        ep_stats[key] = get_feature_stats(ep_ft_data, axis=axes_to_reduce, keepdims=keepdims)  # 使用导入的 get_feature_stats

        if ft["dtype"] in ["image", "video"]:
            ep_stats[key] = {
                k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items()
            }

    dataset.meta.episodes_stats[ep_idx] = ep_stats


def convert_stats(dataset: LeRobotDataset, num_workers: int = 0):
    assert dataset.episodes is None
    print("Computing episodes stats")
    total_episodes = dataset.meta.total_episodes
    if num_workers > 0:
        with ThreadPoolExecutor(max_workers=num_workers) as executor:
            futures = {
                executor.submit(convert_episode_stats, dataset, ep_idx): ep_idx
                for ep_idx in range(total_episodes)
            }
            for future in tqdm(as_completed(futures), total=total_episodes):
                future.result()
    else:
        for ep_idx in tqdm(range(total_episodes)):
            convert_episode_stats(dataset, ep_idx)

    for ep_idx in tqdm(range(total_episodes)):
        write_episode_stats(ep_idx, dataset.meta.episodes_stats[ep_idx], dataset.root)


def check_aggregate_stats(
    dataset: LeRobotDataset,
    reference_stats: dict[str, dict[str, np.ndarray]],
    video_rtol_atol: tuple[float] = (1e-2, 1e-2),
    default_rtol_atol: tuple[float] = (5e-6, 6e-5),
):
    agg_stats = aggregate_stats(list(dataset.meta.episodes_stats.values()))  # 使用导入的 aggregate_stats
    for key, ft in dataset.features.items():
        rtol, atol = video_rtol_atol if ft["dtype"] == "video" else default_rtol_atol
        for stat, val in agg_stats[key].items():
            if key in reference_stats and stat in reference_stats[key]:
                err_msg = f"feature='{key}' stats='{stat}'"
                np.testing.assert_allclose(
                    val, reference_stats[key][stat], rtol=rtol, atol=atol, err_msg=err_msg
                )


def convert_local_dataset(data_dir: str, num_workers: int = 4):
    data_path = Path(data_dir)
    if not data_path.exists():
        raise FileNotFoundError(f"Dataset directory {data_path} does not exist.")

    with SuppressWarnings():
        dataset = LeRobotDataset(data_dir, revision=V20)

    # Remove existing episodes_stats.jsonl if it exists
    episodes_stats_path = data_path / EPISODES_STATS_PATH
    if episodes_stats_path.is_file():
        episodes_stats_path.unlink()

    # Generate new episode stats
    convert_stats(dataset, num_workers=num_workers)

    # Load reference stats and verify
    ref_stats = load_stats(data_path)
    check_aggregate_stats(dataset, ref_stats)

    # Update codebase_version in info.json
    dataset.meta.info["codebase_version"] = CODEBASE_VERSION
    write_info(dataset.meta.info, dataset.root)

    # Remove deprecated stats.json
    stats_path = data_path / STATS_PATH
    if stats_path.is_file():
        stats_path.unlink()

    print(f"Dataset at {data_dir} successfully converted to version {V21}.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data-dir",
        type=str,
        required=True,
        help="Path to the local LeRobot dataset directory (e.g., /home/zhq2004/XArm/lerobot_smolvla/datasets/pick_put_red).",
    )
    parser.add_argument(
        "--num-workers",
        type=int,
        default=4,
        help="Number of workers for parallelizing stats computation. Defaults to 4.",
    )

    args = parser.parse_args()
    convert_local_dataset(**vars(args))