#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
SmolVLA 推理服务端
用于加载模型并通过网络接收数据进行推理
"""

import os
import torch
import numpy as np
import logging
import argparse
import uvicorn
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Dict, List, Optional, Union, Any
from collections import deque
import json

from lerobot.common.datasets.smolvla_dataset import SmolVLADataset, ACTION, OBS_STATE
from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy
from lerobot.configs.types import PolicyFeature, FeatureType

# 设置日志级别
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 创建 FastAPI 应用
app = FastAPI(title="SmolVLA 推理服务", description="用于 SmolVLA 模型的推理服务")

# 全局变量，用于存储模型和配置
model = None
config = None
dataset_stats = None
device = None


class InferenceRequest(BaseModel):
    """推理请求模型"""
    head_camera: List[List[List[float]]]  # [C, H, W] 格式的图像数据
    left_camera: Optional[List[List[List[float]]]] = None
    right_camera: Optional[List[List[List[float]]]] = None
    left_arm_qpos: List[float]
    right_arm_qpos: List[float]
    left_gripper_qpos: List[float]
    right_gripper_qpos: List[float]
    task: Optional[str] = "pick the object"


class InferenceResponse(BaseModel):
    """推理响应模型"""
    action: List[List[float]]  # [n_action_steps, action_dim] 格式的动作数据
    success: bool
    message: str


def create_config():
    """创建模型配置"""
    config = SmolVLAConfig(
        max_state_dim=32,
        max_action_dim=32,
        chunk_size=50,
        n_obs_steps=1,
        n_action_steps=50,
        resize_imgs_with_padding=(512, 512),
        empty_cameras=2,  # 设置为2表示使用空相机
        prefix_length=400,  # 设置固定的前缀长度，确保序列长度一致
    )
    
    # 手动添加图像特征
    if not hasattr(config, 'input_features'):
        config.input_features = {}
    
    # 添加真实图像键
    for key in ["head_camera", "left_camera", "right_camera"]:
        config.input_features[key] = PolicyFeature(
            type=FeatureType.VISUAL,
            shape=(3, 480, 640),
        )
    # state
    config.input_features["state"] = PolicyFeature(
        type=FeatureType.STATE,
        shape=(config.max_state_dim,),
    )
    # instruction
    config.input_features["task"] = PolicyFeature(
        type=FeatureType.INSTRUCTION,
        shape=(config.max_instruction_dim,),
    )
    
    # 添加输出特征
    if not hasattr(config, 'output_features'):
        config.output_features = {}
    
    # 添加动作特征（机器人关节控制）
    config.output_features[ACTION] = PolicyFeature(
        type=FeatureType.ACTION,  # 使用ACTION类型
        shape=(config.max_action_dim,),  # 使用配置中的动作维度
    )
    
    return config


def load_dataset_stats(stats_path):
    """加载数据集统计信息"""
    logger.info(f"加载数据集统计信息: {stats_path}")
    
    # 加载 numpy 文件
    np_stats = np.load(stats_path)
    
    # 转换为模型所需的格式
    dataset_stats = {}
    for key in np_stats.files:
        parts = key.split('_')
        feature_name = parts[0]
        stat_name = '_'.join(parts[1:])
        
        if feature_name not in dataset_stats:
            dataset_stats[feature_name] = {}
        
        dataset_stats[feature_name][stat_name] = torch.from_numpy(np_stats[key])
    
    return dataset_stats


def load_model(config, model_path, dataset_stats, device):
    """加载模型"""
    logger.info(f"加载模型: {model_path}")
    
    # 创建模型实例
    model = SmolVLAPolicy(config, dataset_stats=dataset_stats)
    
    # 加载模型权重
    state_dict = torch.load(model_path, map_location=device)
    model.load_state_dict(state_dict)
    model.to(device)
    model.eval()
    
    return model


def prepare_batch_from_request(request_data, device):
    """从请求数据准备批次"""
    batch = {}
    
    # 处理图像数据
    batch["head_camera"] = torch.tensor(request_data.head_camera, dtype=torch.float32).unsqueeze(0).to(device)
    
    # 处理可选的相机数据
    if request_data.left_camera:
        batch["left_camera"] = torch.tensor(request_data.left_camera, dtype=torch.float32).unsqueeze(0).to(device)
    else:
        batch["left_camera"] = torch.zeros((1, 3, 480, 640), dtype=torch.float32, device=device)
    
    if request_data.right_camera:
        batch["right_camera"] = torch.tensor(request_data.right_camera, dtype=torch.float32).unsqueeze(0).to(device)
    else:
        batch["right_camera"] = torch.zeros((1, 3, 480, 640), dtype=torch.float32, device=device)
    
    # 处理状态数据
    batch["observation.left_arm.qpos"] = torch.tensor(request_data.left_arm_qpos, dtype=torch.float32).to(device)
    batch["observation.right_arm.qpos"] = torch.tensor(request_data.right_arm_qpos, dtype=torch.float32).to(device)
    batch["observation.left_gripper.qpos"] = torch.tensor(request_data.left_gripper_qpos, dtype=torch.float32).to(device)
    batch["observation.right_gripper.qpos"] = torch.tensor(request_data.right_gripper_qpos, dtype=torch.float32).to(device)
    
    # 处理任务描述
    batch["task"] = request_data.task
    
    # 创建观测状态向量
    state_parts = [
        batch["observation.left_arm.qpos"],
        batch["observation.left_gripper.qpos"],
        batch["observation.right_arm.qpos"],
        batch["observation.right_gripper.qpos"]
    ]
    
    # 拼接状态向量
    obs_state = torch.cat(state_parts, dim=0)
    
    # 填充到最大维度
    if obs_state.shape[0] < config.max_state_dim:
        padding = torch.zeros(config.max_state_dim - obs_state.shape[0], device=device)
        obs_state = torch.cat([obs_state, padding], dim=0)
    
    # 添加批次维度
    batch[OBS_STATE] = obs_state.unsqueeze(0)
    
    return batch


@app.post("/inference", response_model=InferenceResponse)
async def inference(request_data: InferenceRequest):
    """推理端点"""
    global model, config, device

    # print(f"------inference:{request_data}")
    
    if model is None:
        raise HTTPException(status_code=500, detail="模型未初始化")
    
    try:
        # 准备批次数据
        batch = prepare_batch_from_request(request_data, device)
        
        # 模型推理
        with torch.no_grad():
            # 确保模型处于评估模式
            model.eval()
            
            # 重置模型的动作队列
            model.reset()
            
            # 准备输入数据
            images, img_masks = model.prepare_images(batch)
            state = model.prepare_state(batch)
            lang_tokens, lang_masks = model.prepare_language(batch)
            
            # 使用模型的sample_actions方法进行推理
            actions = model.model.sample_actions(images, img_masks, lang_tokens, lang_masks, state)
            
            # 反归一化输出
            original_action_dim = model.config.output_features[ACTION].shape[0]
            actions = actions[:, :, :original_action_dim]
            actions = model.unnormalize_outputs({ACTION: actions})[ACTION]
            
            # 获取预测结果
            prediction = actions[0].cpu().numpy().tolist()
        
        return InferenceResponse(
            action=prediction,
            success=True,
            message="推理成功"
        )
    
    except Exception as e:
        logger.error(f"推理过程中出错: {str(e)}")
        raise HTTPException(status_code=500, detail=f"推理失败: {str(e)}")


@app.get("/health")
async def health_check():
    """健康检查端点"""
    if model is None:
        return {"status": "error", "message": "模型未初始化"}
    return {"status": "ok", "message": "服务正常运行"}


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="SmolVLA 推理服务")
    parser.add_argument("--model_path", type=str, default="/home/dm/ydw/lerobot/output/model_step_16000.pt",
                        help="模型权重路径")
    parser.add_argument("--stats_path", type=str, default="/home/dm/ydw/lerobot/output/dataset_stats.npz",
                        help="数据集统计信息路径")
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
                        help="推理设备")
    parser.add_argument("--host", type=str, default="0.0.0.0",
                        help="服务主机地址")
    parser.add_argument("--port", type=int, default=8000,
                        help="服务端口")
    return parser.parse_args()


def initialize_model(args):
    """初始化模型"""
    global model, config, dataset_stats, device
    
    # 设置设备
    device = args.device
    logger.info(f"使用设备: {device}")
    
    # 创建配置
    config = create_config()
    logger.info("模型配置已创建")
    
    # 检查模型文件是否存在
    if not os.path.exists(args.model_path):
        logger.error(f"模型文件不存在: {args.model_path}")
        return False
    
    # 检查统计信息文件是否存在
    if not os.path.exists(args.stats_path):
        logger.warning(f"数据集统计信息文件不存在: {args.stats_path}")
        logger.warning("将尝试创建默认的统计信息")
        
        # 创建默认的统计信息
        dataset_stats = {}
        action_dim = config.output_features[ACTION].shape[0]
        dataset_stats[ACTION] = {
            "mean": torch.zeros(action_dim, device=device),
            "std": torch.ones(action_dim, device=device),
        }
    else:
        # 加载数据集统计信息
        dataset_stats = load_dataset_stats(args.stats_path)
    
    # 加载模型
    try:
        model = load_model(config, args.model_path, dataset_stats, device)
        logger.info("模型加载成功")
        return True
    except Exception as e:
        logger.error(f"加载模型时出错: {str(e)}")
        return False


def main():
    """主函数"""
    # 解析参数
    args = parse_args()
    
    # 初始化模型
    if not initialize_model(args):
        logger.error("初始化模型失败，服务无法启动")
        return
    
    # 启动服务
    logger.info(f"启动服务，监听 {args.host}:{args.port}")
    uvicorn.run(app, host=args.host, port=args.port)


if __name__ == "__main__":
    main()
