import random
import base64
import numpy as np
import torch
from fastapi import APIRouter, HTTPException, Query
from fastapi.responses import JSONResponse
from typing import Optional
import io
from PIL import Image
import h5py
import os

# 导入你的数据集和模型
from hand.recognition.datasets.gesture import HandPoseDataset
from hand.track.umetrack.model_v3.dyd_model import DYDModelV2
from hand.track.umetrack.model_v3.model_opts import ModelOpts

# 创建路由
router = APIRouter(prefix="/classify", tags=["classification"])

# 全局变量 - 在模块加载时初始化一次
dataset = None
class_distribution = None
num_classes = None
model = None
device = None
dataset_root = r"files\hand\datasets"


def convert_numpy_types(obj):
    """递归地将numpy数据类型转换为Python原生类型"""
    if isinstance(obj, dict):
        return {convert_numpy_types(k): convert_numpy_types(v) for k, v in obj.items()}
    elif isinstance(obj, list):
        return [convert_numpy_types(item) for item in obj]
    elif isinstance(obj, (np.integer, np.uint8, np.uint16, np.uint32, np.uint64)):
        return int(obj)
    elif isinstance(obj, (np.floating, np.float16, np.float32, np.float64)):
        return float(obj)
    elif isinstance(obj, np.ndarray):
        return convert_numpy_types(obj.tolist())
    elif isinstance(obj, (np.bool_)):
        return bool(obj)
    else:
        return obj


def load_model():
    """加载训练好的模型"""
    global model, device, num_classes

    try:
        # 设备设置
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Using device: {device}")

        # 模型配置
        model_opts = ModelOpts()
        model_opts.numClasses = num_classes

        # 创建模型
        model = DYDModelV2(
            input_size=(96, 96),
            model_opts=model_opts
        )

        # 加载训练好的权重
        model_path = r"files\hand\models\classify\classification_final_model.pth"

        if os.path.exists(model_path):
            model.load_state_dict(torch.load(model_path, map_location=device))
            print(f"模型权重加载成功: {model_path}")
        else:
            print(f"警告: 未找到模型权重文件 {model_path}，使用随机初始化模型")

        model = model.to(device)
        model.eval()  # 设置为评估模式

        # 打印模型参数数量
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        print(f"模型参数 - 总计: {total_params:,}, 可训练: {trainable_params:,}")

        return True
    except Exception as e:
        print(f"模型加载失败: {e}")
        return False


def tensor_to_base64(tensor):
    """将张量图像转换为base64字符串"""
    try:
        # 确保张量在CPU上并转换为numpy
        if torch.is_tensor(tensor):
            tensor = tensor.cpu().numpy()

        # 处理多视角图像
        if len(tensor.shape) == 4:  # [NUM_VIEWS, 1, H, W]
            images_base64 = []

            for view_idx in range(tensor.shape[0]):
                # 获取单个视角的图像
                img_array = tensor[view_idx, 0]  # [H, W]

                # 归一化到0-255
                if img_array.max() <= 1.0:
                    img_array = (img_array * 255).astype(np.uint8)
                else:
                    img_array = img_array.astype(np.uint8)

                # 转换为PIL图像
                pil_img = Image.fromarray(img_array, mode='L')

                # 转换为base64
                buffer = io.BytesIO()
                pil_img.save(buffer, format="PNG")

                img_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
                images_base64.append(img_base64)

            return images_base64
        else:
            return []

    except Exception as e:
        print(f"图像转换失败: {e}")
        return []


def get_sample_by_id(sample_id: int, split: str = "training"):
    """根据ID获取样本"""
    global dataset

    try:
        # 重新加载指定split的数据集
        data_root = dataset_root
        current_dataset = HandPoseDataset(
            root_dir=data_root,
            split=split,
            num_views=4
        )

        if sample_id < 0 or sample_id >= len(current_dataset):
            raise HTTPException(status_code=404, detail=f"样本ID {sample_id} 在{split}数据集中不存在")

        sample = current_dataset[sample_id]
        # 转换图像为base64
        images_base64 = tensor_to_base64(sample['images'])

        # 转换numpy类型为Python原生类型
        result = {
            "sample_id": sample_id,
            "images": images_base64,
            "label": convert_numpy_types(sample['label'].item()),
            "num_views": len(images_base64),
            "view_mask": convert_numpy_types(
                sample['view_mask'].tolist() if torch.is_tensor(sample['view_mask']) else sample['view_mask']),
            "hand_idx": convert_numpy_types(
                sample['hand_idx'].item() if torch.is_tensor(sample['hand_idx']) else sample['hand_idx']),
            "split": split
        }
        return result
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取样本失败: {str(e)}")


def get_random_sample(split: str = "training"):
    """获取随机样本"""
    try:
        # 重新加载指定split的数据集
        data_root = dataset_root
        current_dataset = HandPoseDataset(
            root_dir=data_root,
            split=split,
            num_views=4
        )

        random_sample_id = random.randint(0, len(current_dataset) - 1)
        return get_sample_by_id(random_sample_id, split)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取随机样本失败: {str(e)}")


@router.get("/acc")
async def test_accuracy(
        num: Optional[int] = Query(None, description="测试样本数量，为空或非正整数则使用全部数据"),
        split: str = Query("training", description="数据集分割: training 或 testing")
):
    """
    测试模型准确率

    参数:
    - num: 测试样本数量，为空或非正整数则使用全部数据
    - split: 数据集分割，training 或 testing

    返回:
    - 总体top1准确率
    - 每个类别的top1和top5准确率
    """
    global model, device

    if model is None:
        raise HTTPException(status_code=500, detail="模型未初始化")

    if split not in ["training", "testing"]:
        raise HTTPException(status_code=400, detail="split参数必须是'training'或'testing'")

    try:
        # 加载指定split的数据集
        data_root = dataset_root
        test_dataset = HandPoseDataset(
            root_dir=data_root,
            split=split,
            num_views=4
        )

        # 确定测试样本数量
        if num is None or num <= 0:
            num = len(test_dataset)
        else:
            num = min(num, len(test_dataset))

        print(f"开始测试，使用 {num} 个样本，数据集: {split}")

        # 创建数据加载器
        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=32,
            shuffle=True,
            num_workers=2
        )

        # 统计变量
        total_correct_top1 = 0
        total_correct_top5 = 0
        total_samples = 0

        # 每个类别的统计
        class_correct_top1 = {i: 0 for i in range(num_classes)}
        class_correct_top5 = {i: 0 for i in range(num_classes)}
        class_total = {i: 0 for i in range(num_classes)}

        with torch.no_grad():
            for batch_idx, batch in enumerate(test_loader):
                if total_samples >= num:
                    break

                # 移动数据到设备
                images = batch['images'].to(device)
                labels = batch['label'].to(device)
                view_mask = batch['view_mask'].to(device)
                total_xfs_ = batch['total_xfs_'].to(device)
                world_to_ref_camera = batch['world_to_ref_camera'].to(device)

                # 前向传播
                class_logits = model.classify(
                    images,
                    view_mask,
                    total_xfs_,
                    world_to_ref_camera
                )

                # 计算top1和top5准确率
                _, pred_top1 = torch.max(class_logits, 1)
                _, pred_top5 = torch.topk(class_logits, 5, dim=1)

                # 更新总体统计
                batch_size = labels.size(0)
                total_samples += batch_size

                total_correct_top1 += (pred_top1 == labels).sum().item()
                total_correct_top5 += sum([1 for i in range(batch_size) if labels[i] in pred_top5[i]])

                # 更新每个类别的统计
                for i in range(batch_size):
                    true_label = labels[i].item()
                    class_total[true_label] += 1

                    if pred_top1[i].item() == true_label:
                        class_correct_top1[true_label] += 1

                    if true_label in pred_top5[i]:
                        class_correct_top5[true_label] += 1

                if total_samples >= num:
                    break

        # 计算准确率
        overall_top1 = total_correct_top1 / total_samples
        overall_top5 = total_correct_top5 / total_samples

        # 计算每个类别的准确率
        class_accuracy = {}
        for class_id in range(num_classes):
            if class_total[class_id] > 0:
                class_top1 = class_correct_top1[class_id] / class_total[class_id]
                class_top5 = class_correct_top5[class_id] / class_total[class_id]
            else:
                class_top1 = 0.0
                class_top5 = 0.0

            class_accuracy[class_id] = {
                "top1": class_top1,
                "top5": class_top5,
                "samples": class_total[class_id]
            }

        return JSONResponse(content={
            "status": "success",
            "test_config": {
                "split": split,
                "num_samples": total_samples,
                "num_classes": num_classes
            },
            "overall_accuracy": {
                "top1": overall_top1,
                "top5": overall_top5
            },
            "class_accuracy": convert_numpy_types(class_accuracy)
        })

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"测试失败: {str(e)}")


@router.get("/infer")
async def inference_random_sample(
        split: str = Query("training", description="数据集分割: training 或 testing")
):
    """
    随机选择一个数据进行推理

    参数:
    - split: 数据集分割，training 或 testing

    返回:
    - groundtruth标签
    - 推理标签
    - 多视角的base64图像数据
    """
    global model, device

    if model is None:
        raise HTTPException(status_code=500, detail="模型未初始化")

    if split not in ["training", "testing"]:
        raise HTTPException(status_code=400, detail="split参数必须是'training'或'testing'")

    try:
        # 获取随机样本
        random_sample_id = random.randint(0, len(dataset) - 1)
        sample_data = dataset[random_sample_id]

        # 准备推理数据
        images = sample_data["images"]  # 添加channel维度
        images_base64 = tensor_to_base64(images)
        view_mask = sample_data['view_mask']
        total_xfs_ = sample_data['total_xfs_']
        world_to_ref_camera = sample_data["world_to_ref_camera"]

        # 移动到设备
        images = images.to(device)
        view_mask = view_mask.to(device)
        total_xfs_ = total_xfs_.to(device)
        world_to_ref_camera = world_to_ref_camera.to(device)

        # 推理
        with torch.no_grad():
            class_logits = model.classify(
                images.unsqueeze(0),  # 添加batch维度
                view_mask.unsqueeze(0),
                total_xfs_,
                world_to_ref_camera
            )
            print("test")
            # 获取预测结果
            probabilities = torch.softmax(class_logits, dim=1)
            top5_probs, top5_indices = torch.topk(probabilities, 5, dim=1)

            predicted_label = torch.argmax(class_logits, dim=1).item()
            confidence = probabilities[0, predicted_label].item()

        # 准备top5预测结果
        top5_predictions = []
        for i in range(5):
            top5_predictions.append({
                "class_id": int(top5_indices[0, i].item()),
                "probability": float(top5_probs[0, i].item())
            })

        return JSONResponse(content={
            "status": "success",
            "split": split,
            "sample_id": random_sample_id,
            "ground_truth": convert_numpy_types(sample_data['label'].item()),
            "prediction": {
                "predicted_label": predicted_label,
                "confidence": confidence,
                "top5_predictions": top5_predictions
            },
            "images": images_base64,  # 多视角base64图像
            "view_mask": sample_data["view_mask"].tolist()
        })

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"推理失败: {str(e)}")


@router.get("/class-num")
async def get_class_number():
    """返回数据集中的类别数"""
    if num_classes is None:
        raise HTTPException(status_code=500, detail="数据集未初始化")
    gesture_dict = {
        0: "单指捏合",
        1: "抓取",
        2: "单指指向/数字一",
        3: "比耶/数字二",
        4: "数字三",
        5: "数字三（2）",
        6: "数字四",
        7: "手掌/数字五",
        8: "打call/数字六",
        9: "数字七",
        10: "枪/数字八",
        11: "单指打勾/数字九",
        12: "握拳/数字零",
        13: "竖拇指",
        14: "摇滚手势",
        15: "OK手势",
        16: "单手比心",
        17: "双指指向",
        18: "竖小指",
        19: "其他"
    }

    return JSONResponse(content={
        "status": "success",
        "num_classes": num_classes,
        "gestures": gesture_dict
    })



@router.get("/image")
async def get_image_data(
        sample_id: Optional[int] = Query(None, description="样本ID"),
        label: Optional[int] = Query(None, description="类别标签"),
        split: str = Query("training", description="数据集分割: training 或 testing")
):
    """
    获取图像数据

    参数:
    - sample_id: 样本ID（可选）
    - label: 类别标签（可选）
    - split: 数据集分割，training 或 testing
    """

    try:
        if sample_id is not None:
            # 情况1: 有sample_id参数
            result = get_sample_by_id(sample_id, split)
            result["search_type"] = "by_id"

        elif label is not None:
            # 情况2: 只有label参数 - 需要重新实现以支持split
            data_root = dataset_root
            current_dataset = HandPoseDataset(
                root_dir=data_root,
                split=split,
                num_views=4
            )

            # 获取该标签的所有样本ID
            label_samples = []
            for file_path in current_dataset.file_paths:
                try:
                    with h5py.File(file_path, 'r') as h5f:
                        if 'label' in h5f:
                            labels = h5f['label'][:]
                            # 找到该标签的索引
                            indices = np.where(labels == label)[0]
                            # 转换为全局索引
                            file_idx = current_dataset.file_paths.index(file_path)
                            start_idx, _ = current_dataset.file_indices[file_idx]
                            global_indices = [start_idx + idx for idx in indices]
                            label_samples.extend(global_indices)
                except Exception as e:
                    print(f"读取文件 {file_path} 失败: {e}")
                    continue

            if not label_samples:
                raise HTTPException(status_code=404, detail=f"未找到标签 {label} 在{split}数据集中的样本")

            # 随机选择一个样本
            random_sample_id = random.choice(label_samples)
            result = get_sample_by_id(int(random_sample_id), split)
            result["search_type"] = "by_label"

        else:
            # 情况3: 两个参数都没有
            result = get_random_sample(split)
            result["search_type"] = "random"

        result["status"] = "success"
        return JSONResponse(content=result)

    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"处理请求失败: {str(e)}")


@router.get("/dataset-info")
async def get_dataset_info():
    """获取数据集信息"""
    if dataset is None:
        raise HTTPException(status_code=500, detail="数据集未初始化")

    try:
        stats = dataset.get_dataset_stats()

        # 转换stats中的numpy类型
        stats = convert_numpy_types(stats)

        return JSONResponse(content={
            "status": "success",
            "dataset_info": {
                "total_samples": len(dataset),
                "num_classes": num_classes,
                "class_distribution": class_distribution,
                "split": stats.get('split', 'training'),
                "data_dir": stats.get('data_dir', ''),
                "num_files": len(dataset.file_paths)
            }
        })
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取数据集信息失败: {str(e)}")


@router.get("/health")
async def health_check():
    """健康检查端点"""
    return JSONResponse(content={
        "status": "success",
        "dataset_initialized": dataset is not None,
        "model_initialized": model is not None,
        "num_classes": num_classes if dataset is not None else 0
    })


# 在模块导入时初始化数据集和模型（只执行一次）
try:
    print("正在初始化数据集和模型...")
    data_root = dataset_root
    dataset = HandPoseDataset(
        root_dir=data_root,
        split="training",
        num_views=4
    )
    class_distribution = dataset.get_class_distribution()
    # 转换numpy类型为Python原生类型
    class_distribution = convert_numpy_types(class_distribution)
    num_classes = len(class_distribution)
    print(f"数据集初始化成功，共 {num_classes} 个类别")
    print(f"类别分布: {class_distribution}")

    # 加载模型
    if load_model():
        print("模型初始化成功")
    else:
        print("模型初始化失败")

except Exception as e:
    print(f"初始化失败: {e}")
    dataset = None
    model = None
