#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
SmolVLA 模型验证脚本
用于读取一个数据样例进行推理并可视化误差
"""

import os
import torch
import numpy as np
import matplotlib.pyplot as plt
import logging
from tqdm import tqdm
import argparse
from collections import deque
import json

from lerobot.common.datasets.smolvla_dataset import SmolVLADataset, create_smolvla_dataloader, ACTION, OBS_STATE
from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy
from lerobot.configs.types import PolicyFeature, FeatureType

# 设置日志级别
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def parse_args():
    parser = argparse.ArgumentParser(description="验证 SmolVLA 模型")
    parser.add_argument("--dataset_path", type=str, default="/home/dm/ydw/datasets/task_data_pick_v1.0",
                        help="数据集路径")
    parser.add_argument("--model_path", type=str, default="/home/dm/ydw/lerobot/output/model_step_16000.pt",
                        help="模型权重路径")
    parser.add_argument("--sample_idx", type=int, default=0,
                        help="要验证的样本索引")
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
                        help="推理设备")
    parser.add_argument("--output_dir", type=str, default="./validation_output",
                        help="输出目录")
    parser.add_argument("--use_empty_cameras", action="store_true", default=False,
                        help="是否使用空相机（默认启用）")
    return parser.parse_args()


def create_config():
    """创建模型配置"""
    config = SmolVLAConfig(
        max_state_dim=32,
        max_action_dim=32,
        chunk_size=50,
        n_obs_steps=1,
        n_action_steps=50,
        resize_imgs_with_padding=(512, 512),
        empty_cameras=2,  # 设置为2表示使用空相机
        prefix_length=400,  # 设置固定的前缀长度，确保序列长度一致
    )
    
    # 手动添加图像特征
    if not hasattr(config, 'input_features'):
        config.input_features = {}
    
    # 添加真实图像键
    for key in ["head_camera", "left_camera", "right_camera"]:
        config.input_features[key] = PolicyFeature(
            type=FeatureType.VISUAL,
            shape=(3, 480, 640),
        )
    # state
    config.input_features["state"] = PolicyFeature(
        type=FeatureType.STATE,
        shape=(config.max_state_dim,),
    )
    # instruction
    config.input_features["task"] = PolicyFeature(
        type=FeatureType.INSTRUCTION,
        shape=(config.max_instruction_dim,),
    )
    
    # 添加输出特征
    if not hasattr(config, 'output_features'):
        config.output_features = {}
    
    # 添加动作特征（机器人关节控制）
    config.output_features[ACTION] = PolicyFeature(
        type=FeatureType.ACTION,  # 使用ACTION类型
        shape=(config.max_action_dim,),  # 使用配置中的动作维度
    )
    
    return config


def load_dataset_stats(dataloader):
    """计算数据集统计信息"""
    logger.info("计算数据集统计信息...")
    dataset_stats = {}
    
    # 为 action 特征创建统计信息
    actions = []
    for batch in tqdm(dataloader, desc="收集 action 数据"):
        if ACTION in batch:
            actions.append(batch[ACTION])
    
    if actions:
        actions_tensor = torch.cat(actions, dim=0)
        dataset_stats[ACTION] = {
            "mean": actions_tensor.mean(dim=0),
            "std": actions_tensor.std(dim=0) + 1e-8,  # 添加小值避免除零
        }
        logger.info(f"ACTION mean shape: {dataset_stats[ACTION]['mean'].shape}")
        logger.info(f"ACTION std shape: {dataset_stats[ACTION]['std'].shape}")
    else:
        # 如果没有 action 数据，使用默认值
        action_dim = config.output_features[ACTION].shape[0]
        dataset_stats[ACTION] = {
            "mean": torch.zeros(action_dim),
            "std": torch.ones(action_dim),
        }
        logger.info(f"Using default ACTION stats with dim {action_dim}")
    
    return dataset_stats


def save_dataset_stats(dataset_stats, output_dir):
    """保存数据集统计信息到文件"""
    logger.info("保存数据集统计信息到文件...")
    
    # 创建输出目录（如果不存在）
    os.makedirs(output_dir, exist_ok=True)
    
    # 保存为 numpy 文件
    np_stats_path = os.path.join(output_dir, "dataset_stats.npz")
    np_stats = {}
    for key, value in dataset_stats.items():
        for stat_name, stat_value in value.items():
            np_stats[f"{key}_{stat_name}"] = stat_value.cpu().numpy()
    
    np.savez(np_stats_path, **np_stats)
    logger.info(f"数据集统计信息已保存为 numpy 文件: {np_stats_path}")
    
    # 保存为 JSON 文件（仅保存形状和基本信息，因为张量不能直接序列化为 JSON）
    json_stats_path = os.path.join(output_dir, "dataset_stats_info.json")
    json_stats = {}
    for key, value in dataset_stats.items():
        json_stats[key] = {}
        for stat_name, stat_value in value.items():
            json_stats[key][stat_name] = {
                "shape": list(stat_value.shape),
                "min": float(stat_value.min().item()),
                "max": float(stat_value.max().item()),
                "mean": float(stat_value.mean().item())
            }
    
    with open(json_stats_path, "w") as f:
        json.dump(json_stats, f, indent=4)
    logger.info(f"数据集统计信息摘要已保存为 JSON 文件: {json_stats_path}")
    
    return np_stats_path, json_stats_path


def load_model(config, model_path, dataset_stats, device):
    """加载模型"""
    logger.info(f"加载模型: {model_path}")
    
    # 创建模型实例
    model = SmolVLAPolicy(config, dataset_stats=dataset_stats)
    
    # 加载模型权重
    state_dict = torch.load(model_path, map_location=device)
    model.load_state_dict(state_dict)
    model.to(device)
    model.eval()
    
    return model


def visualize_error(ground_truth, prediction, output_path):
    """可视化误差"""
    # 确保输出目录存在
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    # 计算误差
    error = prediction - ground_truth
    mse = np.mean(error ** 2)
    mae = np.mean(np.abs(error))
    
    # 创建图表
    plt.figure(figsize=(12, 8))
    
    # 绘制真实值和预测值
    plt.subplot(2, 1, 1)
    plt.plot(ground_truth, 'b-', label='true')
    plt.plot(prediction, 'r-', label='pred')
    plt.title(f'true vs pred (MSE: {mse:.4f}, MAE: {mae:.4f})')
    plt.legend()
    plt.grid(True)
    
    # 绘制误差
    plt.subplot(2, 1, 2)
    plt.plot(error, 'g-')
    plt.title('error (pred - true)')
    plt.grid(True)
    
    # 保存图表
    plt.tight_layout()
    plt.savefig(output_path)
    logger.info(f"error visualization saved to: {output_path}")
    
    # 显示统计信息
    logger.info(f"均方误差 (MSE): {mse:.4f}")
    logger.info(f"平均绝对误差 (MAE): {mae:.4f}")
    
    return mse, mae


def validate_sample(model, dataloader, sample_idx, device, output_dir):
    """验证单个样本"""
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 获取样本
    dataset = dataloader.dataset
    sample = dataset[sample_idx]
    
    # 将样本转换为批次格式
    batch = {}
    for key, value in sample.items():
        if isinstance(value, torch.Tensor):
            print(f"batch key: {key}, type: {type(value)}, shape: {value.shape}")
            batch[key] = value.unsqueeze(0).to(device)  # 添加批次维度并移动到设备
        else:
            print(f"batch key: {key}, type: {type(value)}")
            batch[key] = value  # 对非张量类型，直接保留原始值
    
    # 获取真实动作
    ground_truth = batch[ACTION].cpu().numpy()[0]  # 移除批次维度
    
    # 模型推理
    with torch.no_grad():
        # 确保模型处于评估模式
        model.eval()
        
        # 重置模型的动作队列
        model.reset()
        
        # 准备输入数据并确保移动到正确的设备
        images, img_masks = model.prepare_images(batch)
        state = model.prepare_state(batch)
        lang_tokens, lang_masks = model.prepare_language(batch)
        
        # 使用模型的sample_actions方法进行推理
        actions = model.model.sample_actions(images, img_masks, lang_tokens, lang_masks, state) # (1,50,32)
        
        # 反归一化输出
        original_action_dim = model.config.output_features[ACTION].shape[0]
        actions = actions[:, :, :original_action_dim]
        actions = model.unnormalize_outputs({ACTION: actions})[ACTION]
        
        # 获取预测结果
        prediction = actions[0].cpu().numpy()  # 移除批次维度
    
    # 可视化误差
    output_path = os.path.join(output_dir, f"sample_{sample_idx}_error.png")
    mse, mae = visualize_error(ground_truth, prediction, output_path)
    
    # 保存结果
    result = {
        "sample_idx": sample_idx,
        "ground_truth": ground_truth,
        "prediction": prediction,
        "mse": mse,
        "mae": mae,
    }
    
    return result


def main():
    # 解析参数
    args = parse_args()
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 创建配置
    config = create_config()
    
    # 创建数据加载器
    logger.info(f"加载数据集: {args.dataset_path}")
    dataloader = create_smolvla_dataloader(
        dataset_path=args.dataset_path,
        config=config,
        batch_size=1,  # 单样本验证
        shuffle=False,
        num_workers=0,
        pin_memory=True,
        local_only=True,  # 仅使用本地数据集
        use_cache=True,   # 使用缓存加速
    ) 
    logger.info("------数据加载器创建完成")
    # 计算数据集统计信息
    dataset_stats = load_dataset_stats(dataloader)
    
    # 保存数据集统计信息到文件
    np_stats_path, json_stats_path = save_dataset_stats(dataset_stats, args.output_dir)
    logger.info(f"数据集统计信息已保存到: {args.output_dir} {json_stats_path}")
    
    # 加载模型
    model = load_model(config, args.model_path, dataset_stats, args.device)
    
    # 验证样本
    result = validate_sample(model, dataloader, args.sample_idx, args.device, args.output_dir)
    
    logger.info(f"验证完成，结果已保存到: {args.output_dir}")


if __name__ == "__main__":
    main()
