#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
SmolVLA 数据集包装器
将 LeRobotDataset 转换为 SmolVLA 训练所需的格式
"""

import os
import logging
import torch
import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Any
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import pickle
import hashlib

from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy, pad_vector, resize_with_pad

# 常量定义
OBS_STATE = "observation.state"
ACTION = "action"
DEFAULT_MAX_STATE_DIM = 32
DEFAULT_MAX_ACTION_DIM = 32


class SmolVLADataset(Dataset):
    """
    SmolVLA 数据集包装器
    将 LeRobotDataset 转换为 SmolVLA 训练所需的格式
    """

    def __init__(
        self,
        dataset: LeRobotDataset,
        config: Optional[SmolVLAConfig] = None,
        max_state_dim: int = DEFAULT_MAX_STATE_DIM,
        max_action_dim: int = DEFAULT_MAX_ACTION_DIM,
        image_size: Tuple[int, int] = (512, 512),
        use_empty_cameras: bool = False,
        chunk_size: int = 50,
        n_obs_steps: int = 1,
        n_action_steps: int = 50,
        use_cache: bool = True,
        cache_dir: Optional[str] = None,
    ):
        """
        初始化 SmolVLA 数据集包装器

        Args:
            dataset: LeRobotDataset 实例
            config: SmolVLA 配置，如果为 None 则使用默认配置
            max_state_dim: 状态向量最大维度
            max_action_dim: 动作向量最大维度
            image_size: 图像大小 (height, width)
            use_empty_cameras: 是否使用空相机（left_camera 和 right_camera）
            chunk_size: 数据块大小
            n_obs_steps: 观测步数
            n_action_steps: 动作步数
            use_cache: 是否使用预处理缓存
            cache_dir: 缓存目录，如果为 None 则使用默认目录
        """
        self.dataset = dataset
        self.max_state_dim = max_state_dim if config is None else config.max_state_dim
        self.max_action_dim = max_action_dim if config is None else config.max_action_dim
        self.image_size = image_size if config is None else config.resize_imgs_with_padding
        self.use_empty_cameras = use_empty_cameras if config is None else (config.empty_cameras > 0)
        self.chunk_size = chunk_size if config is None else config.chunk_size
        self.n_obs_steps = n_obs_steps if config is None else config.n_obs_steps
        self.n_action_steps = n_action_steps if config is None else config.n_action_steps
        self.use_cache = use_cache
        
        # 设置缓存目录
        if cache_dir is None:
            cache_dir = os.path.join(str(dataset.root), '.smolvla_cache')
        self.cache_dir = cache_dir
        os.makedirs(self.cache_dir, exist_ok=True)

        # 图像预处理转换
        self.image_transforms = transforms.Compose([
            transforms.Lambda(lambda x: resize_with_pad(x, self.image_size[1], self.image_size[0])),
            transforms.Lambda(lambda x: x * 2.0 - 1.0),  # 将 [0, 1] 转换为 [-1, 1]
        ])

        # 预处理数据集中的图像路径或加载缓存
        self.preprocessed_data = None
        self.sample_index = None  # 样本索引缓存
        self.cache_path = self._get_cache_path() if self.use_cache else None
        
        if self.use_cache:
            self._load_or_create_cache_index()
        
        # 检查数据集是否包含必要的字段
        self._check_dataset_fields()
        print("_check_dataset_fields 数据集检查完成")
    
    def _generate_cache_key(self):
        """生成缓存键，基于数据集配置和参数"""
        config_str = f"{self.max_state_dim}_{self.max_action_dim}_{self.image_size}_{self.use_empty_cameras}_{self.chunk_size}_{self.n_obs_steps}_{self.n_action_steps}"
        dataset_info = f"{str(self.dataset.root)}_{len(self.dataset)}"
        cache_key = hashlib.md5((config_str + dataset_info).encode()).hexdigest()
        return cache_key
    
    def _get_cache_path(self):
        """获取缓存文件路径"""
        cache_key = self._generate_cache_key()
        return os.path.join(self.cache_dir, f"preprocessed_{cache_key}.pkl")
    
    def _load_or_create_cache_index(self):
        """加载缓存索引或创建新的缓存索引"""
        logging.info("_load_or_create_cache_index 开始加载或创建缓存索引...")
        index_path = self.cache_path + '.index'
        
        if os.path.exists(index_path) and os.path.exists(self.cache_path):
            logging.info(f"加载缓存索引: {index_path}")
            try:
                with open(index_path, 'rb') as f:
                    self.sample_index = pickle.load(f)
                logging.info("缓存索引加载成功，包含 {} 个样本".format(len(self.sample_index)))
                return
            except Exception as e:
                logging.warning(f"加载缓存索引失败: {e}，将重新创建缓存")
        
        # 缓存不存在或加载失败，进行预处理
        logging.info("开始预处理数据集并创建缓存...")
        self._preprocess_and_save_cache_index(self.cache_path, index_path)
    
    def _preprocess_and_save_cache_index(self, cache_path, index_path):
        """预处理数据并保存缓存索引，而不是整个数据集"""
        # 创建样本索引和缓存文件
        self.sample_index = []
        cache_file = open(cache_path, 'wb')
        
        for idx in range(len(self.dataset)):
            if idx % 100 == 0:
                logging.info(f"预处理进度: {idx}/{len(self.dataset)}")
            
            # 获取原始样本
            sample = self.dataset[idx]
            
            # 预处理图像
            processed_sample = self._preprocess_sample_images(sample)
            
            # 添加数据集根目录到样本中
            if hasattr(self.dataset, 'root'):
                processed_sample['dataset_root'] = self.dataset.root
            
            # 拼接观测状态
            obs_state = self._create_observation_state(processed_sample)
            processed_sample[OBS_STATE] = obs_state
            
            # 拼接动作向量
            action = self._create_action(processed_sample)
            processed_sample[ACTION] = action
            
            # 记录样本在缓存文件中的位置
            position = cache_file.tell()
            pickle.dump(processed_sample, cache_file)
            
            # 保存样本索引信息（位置和大小）
            sample_size = cache_file.tell() - position
            self.sample_index.append({
                'position': position,
                'size': sample_size,
                'idx': idx
            })
        
        # 关闭缓存文件
        cache_file.close()
        
        # 保存索引
        logging.info(f"保存缓存索引: {index_path}")
        try:
            with open(index_path, 'wb') as f:
                pickle.dump(self.sample_index, f)
            logging.info("缓存索引保存成功，包含 {} 个样本".format(len(self.sample_index)))
        except Exception as e:
            logging.error(f"保存缓存索引失败: {e}")
    
    def _preprocess_sample_images(self, sample):
        """预处理单个样本的图像"""
        processed_sample = sample.copy()
        
        # 图像字段列表
        image_fields = ["head_camera"]
        if self.use_empty_cameras:
            image_fields.extend(["left_camera", "right_camera"])
        
        # 对每个图像字段进行转换
        for field in image_fields:
            if field in processed_sample and isinstance(processed_sample[field], str):
                try:
                    # 构建完整路径
                    img_path = processed_sample[field]
                    if not os.path.isabs(img_path):
                        img_path = os.path.join(str(self.dataset.root), img_path)
                        
                    if os.path.exists(img_path):
                        # 加载图像
                        from PIL import Image
                        import numpy as np
                        
                        img = Image.open(img_path).convert("RGB")
                        # 转换为 numpy 数组
                        img_array = np.array(img).astype(np.float32) / 255.0
                        # 调整通道顺序为 (C, H, W)
                        img_array = np.transpose(img_array, (2, 0, 1))
                        # 转换为 torch.Tensor
                        processed_sample[field] = torch.from_numpy(img_array).float()
                    else:
                        logging.warning(f"图像文件不存在: {img_path}")
                        processed_sample[field] = torch.zeros((3, 480, 640), dtype=torch.float32)
                except Exception as e:
                    logging.warning(f"加载图像 {field} 时出错: {e}")
                    processed_sample[field] = torch.zeros((3, 480, 640), dtype=torch.float32)
            elif field in processed_sample and hasattr(processed_sample[field], '__array__') and not isinstance(processed_sample[field], torch.Tensor):
                # 如果是 numpy 数组，直接转换为 torch.Tensor
                processed_sample[field] = torch.from_numpy(np.array(processed_sample[field])).float()
        
        return processed_sample

    def _check_dataset_fields(self):
        """检查数据集是否包含必要的字段"""
        # 获取第一个样本来检查字段
        sample = self.dataset[0]
        
        # 检查图像字段
        if "head_camera" not in sample:
            logging.warning("数据集中缺少 'head_camera' 字段")
        
        if self.use_empty_cameras:
            if "left_camera" not in sample:
                logging.warning("数据集中缺少 'left_camera' 字段，但 use_empty_cameras=True")
            if "right_camera" not in sample:
                logging.warning("数据集中缺少 'right_camera' 字段，但 use_empty_cameras=True")
        
        # 检查观测字段
        obs_fields = [
            "observation.left_arm.qpos",
            "observation.right_arm.qpos",
            "observation.left_gripper.qpos",
            "observation.right_gripper.qpos"
        ]
        missing_obs = [field for field in obs_fields if field not in sample]
        if missing_obs:
            logging.warning(f"数据集中缺少以下观测字段: {missing_obs}")
        
        # 检查动作字段
        action_fields = [
            "action.left_arm.qpos",
            "action.right_arm.qpos",
            "action.left_gripper.qpos",
            "action.right_gripper.qpos"
        ]
        missing_action = [field for field in action_fields if field not in sample]
        if missing_action:
            logging.warning(f"数据集中缺少以下动作字段: {missing_action}")
        
        # 检查任务字段
        if "task" not in sample:
            logging.warning("数据集中缺少 'task' 字段")

    def _preprocess_images(self):
        """
        预处理数据集中的图像路径，将其转换为张量
        """
        import logging
        from PIL import Image
        import numpy as np
        import torch
        import os
        
        logging.info("预处理数据集中的图像路径...")
        
        # 定义图像转换函数
        def convert_image_to_tensor(example):
            # 图像字段列表
            image_fields = ["head_camera"]
            if self.use_empty_cameras:
                image_fields.extend(["left_camera", "right_camera"])
            
            # 对每个图像字段进行转换
            for field in image_fields:
                if field in example and isinstance(example[field], str):
                    try:
                        # 构建完整路径
                        img_path = example[field]
                        if not os.path.isabs(img_path):
                            img_path = os.path.join(str(self.dataset.root), img_path)
                            
                        if os.path.exists(img_path):
                            # 加载图像
                            img = Image.open(img_path).convert("RGB")
                            # 转换为 numpy 数组
                            img_array = np.array(img).astype(np.float32) / 255.0
                            # 调整通道顺序为 (C, H, W)
                            img_array = np.transpose(img_array, (2, 0, 1))
                            # 转换为 torch.Tensor
                            example[field] = torch.from_numpy(img_array).float()
                            logging.debug(f"成功加载图像: {field} 从路径: {img_path}")
                        else:
                            logging.warning(f"图像文件不存在: {img_path}")
                            example[field] = torch.zeros((3, 480, 640), dtype=torch.float32)
                    except Exception as e:
                        logging.warning(f"加载图像 {field} 时出错: {e}")
                        example[field] = torch.zeros((3, 480, 640), dtype=torch.float32)
                elif field in example and hasattr(example[field], '__array__') and not isinstance(example[field], torch.Tensor):
                    # 如果是 numpy 数组，直接转换为 torch.Tensor
                    example[field] = torch.from_numpy(np.array(example[field])).float()
            return example
        
        # 应用转换
        self.dataset.hf_dataset = self.dataset.hf_dataset.map(convert_image_to_tensor)
        logging.info("图像预处理完成")

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        """
        获取数据集中的一个样本，并转换为 SmolVLA 所需的格式
        实现延迟加载，只在需要时才加载单个样本

        Args:
            idx: 样本索引

        Returns:
            dict: 包含 SmolVLA 所需字段的样本
        """
        # 如果使用缓存且样本索引存在，从缓存文件中按需加载单个样本
        if self.use_cache and self.sample_index is not None:
            try:
                # 获取样本在缓存文件中的位置信息
                sample_info = self.sample_index[idx]
                
                # 打开缓存文件并定位到样本位置
                with open(self.cache_path, 'rb') as f:
                    f.seek(sample_info['position'])
                    # 只加载这一个样本
                    sample = pickle.load(f)
                return sample
            except Exception as e:
                logging.warning(f"从缓存加载样本 {idx} 失败: {e}，将使用原始处理方式")
        
        # 如果缓存不可用或加载失败，使用原始的处理方式
        # 获取原始样本
        sample = self.dataset[idx]
        
        # 预处理图像
        sample = self._preprocess_sample_images(sample)
        
        # 添加数据集根目录到样本中，供 collate_fn 使用
        if hasattr(self.dataset, 'root'):
            sample['dataset_root'] = self.dataset.root
        
        # 拼接观测状态
        obs_state = self._create_observation_state(sample)
        sample[OBS_STATE] = obs_state
        
        # 拼接动作向量
        action = self._create_action(sample)
        sample[ACTION] = action
        
        return sample

    def _create_observation_state(self, sample):
        """
        从样本中拼接观测状态向量

        Args:
            sample: 数据样本

        Returns:
            torch.Tensor: 观测状态向量
        """
        # 拼接顺序：左臂位置(6) + 左夹爪(1) + 右臂位置(6) + 右夹爪(1)
        state_parts = []
        
        # 左臂位置
        if "observation.left_arm.qpos" in sample:
            state_parts.append(sample["observation.left_arm.qpos"])
        else:
            state_parts.append(torch.zeros(6, device=sample["head_camera"].device))
        
        # 左夹爪
        if "observation.left_gripper.qpos" in sample:
            state_parts.append(sample["observation.left_gripper.qpos"])
        else:
            state_parts.append(torch.zeros(1, device=sample["head_camera"].device))
        
        # 右臂位置
        if "observation.right_arm.qpos" in sample:
            state_parts.append(sample["observation.right_arm.qpos"])
        else:
            state_parts.append(torch.zeros(6, device=sample["head_camera"].device))
        
        # 右夹爪
        if "observation.right_gripper.qpos" in sample:
            state_parts.append(sample["observation.right_gripper.qpos"])
        else:
            state_parts.append(torch.zeros(1, device=sample["head_camera"].device))
        
        # 拼接并填充到最大维度
        state = torch.cat(state_parts, dim=0)
        state = pad_vector(state, self.max_state_dim)
        
        return state

    def _create_action(self, sample):
        """
        从样本中拼接动作向量

        Args:
            sample: 数据样本

        Returns:
            torch.Tensor: 动作向量
        """
        # 拼接顺序：左臂位置(6) + 左夹爪(1) + 右臂位置(6) + 右夹爪(1)
        action_parts = []
        
        # 左臂位置
        if "action.left_arm.qpos" in sample:
            action_parts.append(sample["action.left_arm.qpos"])
        else:
            action_parts.append(torch.zeros(6, device=sample["head_camera"].device))
        
        # 左夹爪
        if "action.left_gripper.qpos" in sample:
            action_parts.append(sample["action.left_gripper.qpos"])
        else:
            action_parts.append(torch.zeros(1, device=sample["head_camera"].device))
        
        # 右臂位置
        if "action.right_arm.qpos" in sample:
            action_parts.append(sample["action.right_arm.qpos"])
        else:
            action_parts.append(torch.zeros(6, device=sample["head_camera"].device))
        
        # 右夹爪
        if "action.right_gripper.qpos" in sample:
            action_parts.append(sample["action.right_gripper.qpos"])
        else:
            action_parts.append(torch.zeros(1, device=sample["head_camera"].device))
        
        # 拼接并填充到最大维度
        action = torch.cat(action_parts, dim=0)
        action = pad_vector(action, self.max_action_dim)
        
        return action


def smolvla_collate_fn(batch, chunk_size=50, n_obs_steps=1, n_action_steps=50):
    """
    SmolVLA 数据集的 collate 函数
    将多个样本组合成一个批次，并处理时间序列切片

    Args:
        batch: 样本列表
        chunk_size: 数据块大小
        n_obs_steps: 观测步数
        n_action_steps: 动作步数

    Returns:
        dict: 批次数据
    """
    # 如果批次为空，返回空字典
    if not batch:
        return {}
    
    # 获取批次大小
    batch_size = len(batch)
    
    # 初始化结果字典
    result = {}
    
    # 处理图像数据
    image_keys = ["head_camera", "left_camera", "right_camera"] # 按照顺序
    for key in image_keys:
        if key in batch[0]:
            # 收集所有图像张量
            processed_images = []
            for sample in batch:
                if key in sample:
                    # 如果是字符串，需要转换为张量（兼容非缓存模式）
                    if isinstance(sample[key], str):
                        try:
                            from PIL import Image
                            import numpy as np
                            
                            # 加载图像
                            # 确保使用完整路径（添加数据集根目录）
                            if 'dataset_root' in sample:
                                img_path = os.path.join(sample['dataset_root'], sample[key])
                            else:
                                img_path = sample[key]
                            img = Image.open(img_path).convert("RGB")
                            # 转换为 numpy 数组
                            img_array = np.array(img).astype(np.float32) / 255.0
                            # 调整通道顺序为 (C, H, W)
                            img_tensor = torch.from_numpy(img_array.transpose(2, 0, 1))
                            processed_images.append(img_tensor)
                        except Exception as e:
                            logging.warning(f"在 collate_fn 中加载图像 {key} 时出错: {e}")
                            # 创建一个空的黑色图像
                            processed_images.append(torch.zeros((3, 480, 640), dtype=torch.float32))
                    else:
                        # 已经是张量（缓存模式），直接添加
                        processed_images.append(sample[key])
                else:
                    # 如果样本中没有这个图像键，创建一个空的黑色图像
                    processed_images.append(torch.zeros((3, 480, 640), dtype=torch.float32))
            
            # 将图像列表转换为张量
            result[key] = torch.stack(processed_images)
    
    # 处理任务文本
    if "task" in batch[0]:
        result["task"] = [sample["task"] for sample in batch]

    # 处理观测状态
    if OBS_STATE in batch[0]:
        states = torch.stack([sample[OBS_STATE] for sample in batch])
        result[OBS_STATE] = states
    
    # 处理动作
    if ACTION in batch[0]:
        actions = torch.stack([sample[ACTION] for sample in batch])
        result[ACTION] = actions # (1,32)
    
    return result


def create_smolvla_dataloader(
    dataset_path: str,
    config: Optional[SmolVLAConfig] = None,
    batch_size: int = 8,
    shuffle: bool = True,
    num_workers: int = 4,
    pin_memory: bool = True,
    local_only: bool = False,
    use_cache: bool = True,
    cache_dir: Optional[str] = None,
    **kwargs
):
    """
    创建 SmolVLA 数据加载器

    Args:
        dataset_path: 数据集路径
        config: SmolVLA 配置
        batch_size: 批次大小
        shuffle: 是否打乱数据
        num_workers: 数据加载线程数
        pin_memory: 是否将数据固定在内存中
        use_cache: 是否使用预处理缓存
        cache_dir: 缓存目录
        **kwargs: 其他参数传递给 SmolVLADataset

    Returns:
        DataLoader: SmolVLA 数据加载器
    """
    # 创建 LeRobotDataset
    lerobot_dataset = LeRobotDataset(
        root=dataset_path,
        repo_id=os.path.basename(dataset_path),
        local_only=local_only,
        # episodes=[1,2,3,4,5],
    )
    
    # 创建 SmolVLADataset
    smolvla_dataset = SmolVLADataset(
        dataset=lerobot_dataset,
        config=config,
        use_cache=use_cache,
        cache_dir=cache_dir,
        **kwargs
    )
    
    # 创建 DataLoader
    chunk_size = config.chunk_size if config is not None else 50
    n_obs_steps = config.n_obs_steps if config is not None else 1
    n_action_steps = config.n_action_steps if config is not None else 50
    
    dataloader = DataLoader(
        smolvla_dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=pin_memory,
        collate_fn=lambda b: smolvla_collate_fn(b, chunk_size, n_obs_steps, n_action_steps)
    )
    
    return dataloader
