#!/usr/bin/env python

"""
训练奖励分类器的专用脚本
"""

import json
import logging
import os
from pathlib import Path
from omegaconf import DictConfig, OmegaConf
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.policies.reward_model.configuration_classifier import RewardClassifierConfig
from lerobot.configs.default import DatasetConfig
from lerobot.scripts.train import train
from lerobot.configs.types import NormalizationMode, PolicyFeature, FeatureType
from lerobot.common.datasets.utils import cycle
from torch.utils.data import ConcatDataset
import torch
from torchvision.transforms import functional as F
from typing import Dict, Tuple, Optional, Any, List

class CustomImageTransform:
    """
    自定义图像变换类，支持剪切和缩放
    """
    def __init__(self, crop_params_dict: Optional[Dict[str, Tuple[int, int, int, int]]] = None, 
                 resize_size: Optional[Tuple[int, int]] = None):
        self.crop_params_dict = crop_params_dict or {}
        self.resize_size = resize_size
        self.logger = logging.getLogger(__name__)
        
        # 记录变换参数
        if self.crop_params_dict:
            self.logger.info(f"初始化图像剪切变换: {self.crop_params_dict}")
        if self.resize_size:
            self.logger.info(f"初始化图像缩放变换: {self.resize_size}")
    
    def __call__(self, image: torch.Tensor) -> torch.Tensor:
        """
        对单张图像应用变换
        
        Args:
            image: 输入图像张量 (C, H, W)
        
        Returns:
            变换后的图像张量
        """
        # 应用剪切（如果有剪切参数）
        if self.crop_params_dict:
            # 注意：这里我们假设所有图像都使用相同的剪切参数
            # 在实际应用中，可能需要根据图像的key来选择不同的参数
            for key, (top, left, height, width) in self.crop_params_dict.items():
                if "side" in key:  # 假设我们处理的是side相机图像
                    image = F.crop(image, top, left, height, width)
                    break
        
        # 应用缩放（如果指定了resize_size）
        if self.resize_size:
            image = F.resize(image, self.resize_size)
        
        # 确保像素值在[0,1]范围内
        return image.clamp(0, 1)

def create_image_transforms(crop_params_dict: Optional[Dict[str, Tuple[int, int, int, int]]] = None, 
                          resize_size: Optional[Tuple[int, int]] = None):
    """
    创建图像变换对象
    
    Args:
        crop_params_dict: 剪切参数字典，格式为 {key: (top, left, height, width)}
        resize_size: 缩放尺寸 (height, width)
    
    Returns:
        变换对象或None
    """
    if crop_params_dict or resize_size:
        return CustomImageTransform(crop_params_dict, resize_size)
    return None

def create_combined_dataset(dataset_paths: List[str], config_dict: dict):
    """
    创建合并的数据集
    
    Args:
        dataset_paths: 数据集路径列表
        config_dict: 配置字典
    
    Returns:
        合并后的数据集配置
    """
    logger = logging.getLogger(__name__)
    
    # 为每个数据集路径创建配置
    datasets = []
    for i, dataset_path in enumerate(dataset_paths):
        logger.info(f"加载数据集 {i+1}: {dataset_path}")
        
        # 创建单个数据集的配置
        dataset_config = config_dict["dataset"].copy()
        dataset_config["root"] = dataset_path
        
        datasets.append(dataset_config)
    
    return datasets


def train_with_multiple_datasets(config_dict: dict, dataset_paths: List[str]):
    """
    使用多个数据集进行训练的自定义函数
    
    Args:
        config_dict: 配置字典
        dataset_paths: 数据集路径列表
    """
    logger = logging.getLogger(__name__)
    
    try:
        from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig
        from lerobot.common.policies.reward_model.configuration_classifier import RewardClassifierConfig
        from lerobot.configs.types import NormalizationMode
        from torch.utils.data import ConcatDataset, Subset, DataLoader
        import random
        import tempfile
        import shutil
        from pathlib import Path
        
        # 提取图像预处理参数（DatasetConfig不支持这些参数）
        base_dataset_config_dict = config_dict["dataset"].copy()
        crop_params_dict = base_dataset_config_dict.pop("crop_params_dict", None)
        resize_size = base_dataset_config_dict.pop("resize_size", None)
        
        # 记录图像预处理参数
        if crop_params_dict:
            logger.info(f"图像剪切参数: {crop_params_dict}")
        if resize_size:
            logger.info(f"图像缩放尺寸: {resize_size}")
        
        # 创建自定义图像变换函数
        image_transforms = create_image_transforms(crop_params_dict, tuple(resize_size) if resize_size else None)
        
        # 创建多个数据集并合并
        all_datasets = []
        for i, dataset_path in enumerate(dataset_paths):
            logger.info(f"创建数据集，路径: {dataset_path}")
            
            # 为每个数据集创建配置
            dataset_config_dict = base_dataset_config_dict.copy()
            dataset_config_dict["root"] = dataset_path
            dataset_config = DatasetConfig(**dataset_config_dict)
            
            # 创建LeRobotDataset并传入图像变换
            dataset = LeRobotDataset(
                repo_id=dataset_config.repo_id,
                root=dataset_config.root,
                episodes=dataset_config.episodes,
                image_transforms=image_transforms,
                revision=dataset_config.revision,
                video_backend=dataset_config.video_backend
            )
            
            logger.info(f"数据集 {i+1} 包含 {len(dataset)} 个样本")
            all_datasets.append(dataset)
        
        # 合并所有数据集
        if len(all_datasets) > 1:
            combined_dataset = ConcatDataset(all_datasets)
            logger.info(f"合并后的数据集包含 {len(combined_dataset)} 个样本")
        else:
            combined_dataset = all_datasets[0]
            logger.info(f"使用单个数据集，包含 {len(combined_dataset)} 个样本")
        
        # 创建随机打乱的索引
        indices = list(range(len(combined_dataset)))
        random.shuffle(indices)
        logger.info(f"已随机打乱数据集索引")
        
        # 使用随机索引创建子集
        shuffled_dataset = Subset(combined_dataset, indices)
        logger.info(f"创建随机打乱的数据集子集，包含 {len(shuffled_dataset)} 个样本")
        
        # 使用第一个数据集的配置作为基础配置
        dataset_config = DatasetConfig(**{**base_dataset_config_dict, "root": dataset_paths[0]})
        
        # 修复标准化映射类型和特征配置
        policy_dict = config_dict["policy"].copy()
        if "normalization_mapping" in policy_dict:
            norm_mapping = {}
            for key, value in policy_dict["normalization_mapping"].items():
                if isinstance(value, str):
                    norm_mapping[key] = NormalizationMode(value)
                else:
                    norm_mapping[key] = value
            policy_dict["normalization_mapping"] = norm_mapping
        
        # 修复input_features配置
        if "input_features" in policy_dict:
            input_features = {}
            for key, feature_dict in policy_dict["input_features"].items():
                if isinstance(feature_dict, dict):
                    feature_type = FeatureType(feature_dict["type"])
                    shape = tuple(feature_dict["shape"])
                    input_features[key] = PolicyFeature(type=feature_type, shape=shape)
                else:
                    input_features[key] = feature_dict
            policy_dict["input_features"] = input_features
        
        # 修复output_features配置
        if "output_features" in policy_dict:
            output_features = {}
            for key, feature_dict in policy_dict["output_features"].items():
                if isinstance(feature_dict, dict) and "shape" in feature_dict:
                    # 对于输出特征，我们可能不需要type，只需要shape
                    shape = tuple(feature_dict["shape"])
                    # 假设输出特征是ACTION类型
                    output_features[key] = PolicyFeature(type=FeatureType.ACTION, shape=shape)
                else:
                    output_features[key] = feature_dict
            policy_dict["output_features"] = output_features
        
        # 创建策略配置
        policy_config = RewardClassifierConfig(**policy_dict)
        
        # 创建eval配置
        eval_config = EvalConfig(**config_dict.get("eval", {}))
        
        # 创建wandb配置
        wandb_config = WandBConfig(**config_dict.get("wandb", {}))
        
        # 创建训练配置
        train_config = {
            k: v for k, v in config_dict.items() 
            if k not in ["dataset", "policy", "eval", "wandb"]
        }
        train_config["policy"] = policy_config
        train_config["eval"] = eval_config
        train_config["wandb"] = wandb_config
        
        # 创建OmegaConf配置对象
        cfg = OmegaConf.create(train_config)
        
        # 记录配置信息
        logger.info("配置加载成功")
        logger.info(f"使用 {len(dataset_paths)} 个数据集进行训练")
        for i, path in enumerate(dataset_paths):
            logger.info(f"  数据集 {i+1}: {path}")
        logger.info(f"策略名称: {cfg.policy.name}")
        logger.info(f"设备: {cfg.policy.device}")
        logger.info(f"批次大小: {cfg.batch_size}")
        logger.info(f"训练步数: {cfg.steps}")
        
        # 使用带时间戳的输出目录避免冲突
        import datetime
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique_output_dir = f"outputs/reward_classifier_{timestamp}"
        cfg.output_dir = unique_output_dir
        
        if cfg.output_dir:
            Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
            logger.info(f"输出目录: {cfg.output_dir}")
        
        # 测试图像变换功能
        if image_transforms:
            logger.info("测试图像变换功能...")
            try:
                # 获取第一个样本来测试变换
                sample = shuffled_dataset[0]
                for key, value in sample.items():
                    if "image" in key and torch.is_tensor(value) and len(value.shape) == 3:
                        logger.info(f"原始图像 {key} 尺寸: {value.shape}")
                        transformed = image_transforms(value)
                        logger.info(f"变换后图像 {key} 尺寸: {transformed.shape}")
                        break
            except Exception as e:
                logger.warning(f"图像变换测试失败: {e}")
        
        # 使用合并的随机打乱数据集进行训练
        logger.info("开始训练，使用合并的随机打乱数据集...")
        
        # 启动训练
        logger.info("启动训练...")
        
        # 创建一个临时数据集文件，包含合并后的数据
        import tempfile
        import shutil
        from pathlib import Path
        
        # 创建临时目录用于合并数据集
        temp_dir = Path(tempfile.mkdtemp(prefix="combined_dataset_"))
        logger.info(f"创建临时合并数据集目录: {temp_dir}")
        
        # 将所有数据集复制到临时目录
        for i, dataset_path in enumerate(dataset_paths):
            src_path = Path(dataset_path)
            # 复制所有文件
            for file_path in src_path.glob("**/*"):
                if file_path.is_file():
                    # 创建相对路径
                    rel_path = file_path.relative_to(src_path)
                    # 创建目标路径
                    dst_path = temp_dir / rel_path
                    # 确保父目录存在
                    dst_path.parent.mkdir(parents=True, exist_ok=True)
                    # 复制文件
                    shutil.copy2(file_path, dst_path)
        
        logger.info(f"已将所有数据集合并到临时目录: {temp_dir}")
        
        # 使用命令行参数方式启动训练
        import subprocess
        import sys
        
        logger.info("使用LeRobot训练脚本启动训练...")
        
        # 构建命令行参数，使用临时合并目录作为数据集路径
        cmd = [
            sys.executable, "-m", "lerobot.scripts.train",
            "--dataset.repo_id", dataset_config.repo_id,
            "--dataset.root", str(temp_dir),  # 使用临时合并目录
            "--policy.type", "reward_classifier",
            "--policy.name", cfg.policy.name,
            "--policy.device", cfg.policy.device,
            "--batch_size", str(cfg.batch_size),
            "--steps", str(cfg.steps),
            "--output_dir", cfg.output_dir,
            "--policy.num_classes", str(cfg.policy.num_classes),
            "--policy.hidden_dim", str(cfg.policy.hidden_dim),
            "--policy.learning_rate", str(cfg.policy.learning_rate),
            "--policy.weight_decay", str(cfg.policy.weight_decay),
            "--policy.num_cameras", "1",  # 明确指定使用1个摄像头
        ]
        
        # 添加可选参数
        if hasattr(cfg, 'seed') and cfg.seed is not None:
            cmd.extend(["--seed", str(cfg.seed)])
        
        logger.info(f"运行命令: {' '.join(cmd)}")
        
        # 运行训练命令
        result = subprocess.run(cmd, cwd=os.getcwd(), capture_output=False)
        
        # 清理临时目录
        try:
            shutil.rmtree(temp_dir)
            logger.info(f"已清理临时合并数据集目录: {temp_dir}")
        except Exception as e:
            logger.warning(f"清理临时目录失败: {e}")
        
        if result.returncode == 0:
            logger.info("训练完成！")
        else:
            logger.error(f"训练失败，退出码: {result.returncode}")
            raise RuntimeError(f"训练进程失败")
        
        return shuffled_dataset, cfg
        
    except Exception as e:
        logger.error(f"训练过程中出现错误: {e}")
        raise


def main():
    """主函数：加载配置并启动训练"""
    
    # 设置日志
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    
    # 定义要使用的两个数据集路径
    dataset_paths = [
        "/home/dw/workspaces/fangnuo/lerobot-hilserl/datasets/pick_lift_cube-failed",
        "/home/dw/workspaces/fangnuo/lerobot-hilserl/datasets/pick_lift_cube01"
    ]
    
    # 配置文件路径
    config_path = "/home/dw/workspaces/fangnuo/lerobot-hilserl/lerobot/configs/reward_classifier_train_config.json"
    
    # 检查配置文件是否存在
    if not os.path.exists(config_path):
        logger.error(f"配置文件不存在: {config_path}")
        return
    
    # 加载配置
    logger.info(f"加载配置文件: {config_path}")
    with open(config_path, 'r', encoding='utf-8') as f:
        config_dict = json.load(f)
    
    # 记录要使用的数据集
    logger.info("将使用以下数据集进行训练:")
    for i, path in enumerate(dataset_paths, 1):
        logger.info(f"  数据集 {i}: {path}")
    
    # 验证数据集路径是否存在
    for i, dataset_path in enumerate(dataset_paths, 1):
        if not Path(dataset_path).exists():
            logger.error(f"数据集 {i} 路径不存在: {dataset_path}")
            return
    
    # 调用多数据集训练函数
    try:
        logger.info("开始多数据集训练...")
        dataset, cfg = train_with_multiple_datasets(config_dict, dataset_paths)
        
        logger.info("训练完成！")
        logger.info("图像剪切和缩放参数已成功应用到数据集")
        logger.info(f"数据集包含 {len(dataset)} 个样本")
        
        # 测试图像变换是否生效
        logger.info("验证图像变换效果...")
        sample = dataset[0]
        for key, value in sample.items():
            if "image" in key and torch.is_tensor(value):
                logger.info(f"处理后的图像 {key} 最终尺寸: {value.shape}")
                break
        
        logger.info("图像剪切和缩放功能实现完成！")
        
    except Exception as e:
        logger.error(f"训练过程中出现错误: {e}")
        import traceback
        traceback.print_exc()
        raise


if __name__ == "__main__":
    main()
