"""
HunyuanWorld-Mirror NPU REST API
基于腾讯混元 HunyuanWorld-Mirror 3D重建模型
参考: https://github.com/Tencent-Hunyuan/HunyuanWorld-Mirror/blob/main/app.py

支持: 点云重建、深度估计、法线估计、相机姿态估计、高斯溅射
"""

import os
import sys
import time
import json
import uuid
import io
import base64
import shutil
import threading
import gc
from pathlib import Path
from typing import List, Optional, Dict, Any

import cv2
import numpy as np
from PIL import Image

import torch
import uvicorn
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
from fastapi.responses import JSONResponse, FileResponse
from pydantic import BaseModel

# ========== NPU 设备配置 ==========
NPU_DEVICE_ID = 0
NPU_NAME = f"npu:{NPU_DEVICE_ID}"
_npu_initialized = False  # 标记 NPU 是否已初始化

# ========== Patch threading.Thread 以自动继承 NPU 上下文 ==========
_OriginalThread = threading.Thread

class NPUContextThread(_OriginalThread):
    """
    确保每个新线程都会设置 NPU 上下文
    注意: 华为昇腾 NPU 不能重复初始化，只在第一次时设置
    """
    def run(self):
        global _npu_initialized
        if not _npu_initialized:
            try:
                import torch_npu
                torch.npu.set_device(NPU_NAME)
            except Exception as e:
                # 忽略重复初始化错误
                if "Repeated initialization" not in str(e) and "100002" not in str(e):
                    print(f"[NPUContextThread] Warning: {e}")
        super().run()

threading.Thread = NPUContextThread

# ========== FastAPI ==========
app = FastAPI(
    title="HunyuanWorld-Mirror NPU API",
    description="腾讯混元 3D 重建模型 API - 支持点云重建、深度估计、法线估计、高斯溅射",
    version="1.0.0"
)

# ========== 全局模型 ==========
model = None
device = None
skyseg_session = None

# ========== 输出目录 ==========
OUTPUT_DIR = Path("/tmp/hunyuan_output")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)


# -----------------------------------------------------------
# NPU 上下文管理
# -----------------------------------------------------------
def init_npu_once():
    """初始化 NPU（只执行一次）"""
    global _npu_initialized
    if _npu_initialized:
        return True
    
    try:
        import torch_npu
        torch.npu.set_device(NPU_NAME)
        _npu_initialized = True
        print(f"[Init] NPU initialized: {NPU_NAME}")
        return True
    except Exception as e:
        if "Repeated initialization" in str(e) or "100002" in str(e):
            _npu_initialized = True
            return True
        print(f"[Warning] Failed to init NPU: {e}")
        return False


def ensure_npu_context():
    """确保 NPU 上下文已设置（兼容旧代码）"""
    init_npu_once()


# -----------------------------------------------------------
# 模型加载
# -----------------------------------------------------------
def load_model():
    """加载 HunyuanWorld-Mirror 模型"""
    global model, device, skyseg_session, _npu_initialized
    
    # 检查设备可用性并初始化
    try:
        import torch_npu
        if torch.npu.is_available():
            # 初始化 NPU（只执行一次）
            init_npu_once()
            device = torch.device(NPU_NAME)
            print(f"[Init] Using NPU device: {NPU_NAME}")
        else:
            device = torch.device("cpu")
            print("[Init] NPU not available, using CPU")
    except ImportError:
        if torch.cuda.is_available():
            device = torch.device("cuda")
            print("[Init] Using CUDA device")
        else:
            device = torch.device("cpu")
            print("[Init] Using CPU device")
    
    # 添加项目路径
    project_path = Path("/app/HunyuanWorld-Mirror")
    if project_path.exists():
        sys.path.insert(0, str(project_path))
        print(f"[Init] Added project path: {project_path}")
    
    # 尝试加载模型
    try:
        from src.models.models.worldmirror import WorldMirror
        
        # 优先使用本地模型路径，如果没有则从 HuggingFace 下载
        model_path = os.getenv("MODEL_PATH", "/app/models/HunyuanWorld-Mirror")
        
        if os.path.exists(model_path) and os.path.isdir(model_path):
            print(f"[Init] Loading model from local path: {model_path}")
            model = WorldMirror.from_pretrained(model_path)
        else:
            print(f"[Init] Local model not found at {model_path}")
            print("[Init] Trying to download from HuggingFace: tencent/HunyuanWorld-Mirror")
            model = WorldMirror.from_pretrained("tencent/HunyuanWorld-Mirror")
        
        # NPU 上使用 float16 以节省内存 (float32 占用太多内存)
        # 注意: 如果 float16 也有算子兼容问题，改回 float32
        use_fp16 = os.getenv("USE_FP16", "true").lower() == "true"
        
        if use_fp16:
            print(f"[Init] Converting model to float16 to save memory...")
            model = model.half()  # 转换为 float16
        else:
            print(f"[Init] Converting model to float32...")
            model = model.float()
        
        model = model.to(device)
        model.eval()
        
        # 清理内存
        if 'npu' in str(device):
            torch.npu.empty_cache()
        gc.collect()
        
        print(f"[Init] Model loaded successfully on {device}, dtype={next(model.parameters()).dtype}")
        
    except Exception as e:
        print(f"[Warning] Could not load model: {e}")
        import traceback
        traceback.print_exc()
        model = None
    
    # 加载天空分割模型
    try:
        import onnxruntime
        skyseg_path = "/app/HunyuanWorld-Mirror/skyseg.onnx"
        if not os.path.exists(skyseg_path):
            skyseg_path = "skyseg.onnx"
        if os.path.exists(skyseg_path):
            skyseg_session = onnxruntime.InferenceSession(skyseg_path)
            print(f"[Init] Sky segmentation model loaded")
    except Exception as e:
        print(f"[Warning] Could not load sky segmentation model: {e}")
        skyseg_session = None


# -----------------------------------------------------------
# 图片处理工具
# -----------------------------------------------------------
def encode_image_base64(image) -> str:
    """将图片编码为 base64"""
    if isinstance(image, str):
        with open(image, 'rb') as f:
            return base64.b64encode(f.read()).decode('utf-8')
    
    buffer = io.BytesIO()
    if isinstance(image, Image.Image):
        image.save(buffer, format='PNG')
    elif isinstance(image, np.ndarray):
        Image.fromarray(image).save(buffer, format='PNG')
    else:
        raise ValueError(f"Unsupported image type: {type(image)}")
    return base64.b64encode(buffer.getvalue()).decode('utf-8')


def save_uploaded_files(files: List[UploadFile], task_dir: Path) -> List[Path]:
    """保存上传的文件到指定目录"""
    image_dir = task_dir / "images"
    image_dir.mkdir(parents=True, exist_ok=True)
    
    saved_paths = []
    for i, file in enumerate(files):
        suffix = Path(file.filename).suffix if file.filename else ".jpg"
        file_path = image_dir / f"input_{i:03d}{suffix}"
        with open(file_path, 'wb') as f:
            shutil.copyfileobj(file.file, f)
        saved_paths.append(file_path)
        file.file.seek(0)  # Reset file pointer
    return saved_paths


def visualize_depth(depth: np.ndarray) -> np.ndarray:
    """可视化深度图"""
    depth_normalized = (depth - depth.min()) / (depth.max() - depth.min() + 1e-8)
    depth_colormap = cv2.applyColorMap((depth_normalized * 255).astype(np.uint8), cv2.COLORMAP_INFERNO)
    return cv2.cvtColor(depth_colormap, cv2.COLOR_BGR2RGB)


def visualize_normal(normal: np.ndarray) -> np.ndarray:
    """可视化法线图"""
    normal_vis = ((normal + 1) / 2 * 255).astype(np.uint8)
    return normal_vis


# -----------------------------------------------------------
# 推理函数 (参考 app.py 的 run_model 函数)
# -----------------------------------------------------------
def run_inference(
    task_dir: Path,
    output_types: List[str],
    confidence_percentile: float = 10,
    edge_normal_threshold: float = 5.0,
    edge_depth_threshold: float = 0.03,
    apply_confidence_mask: bool = True,
    apply_edge_mask: bool = True,
) -> Dict[str, Any]:
    """
    执行 3D 重建推理
    参考: https://github.com/Tencent-Hunyuan/HunyuanWorld-Mirror/blob/main/app.py
    """
    global model, device, skyseg_session
    
    ensure_npu_context()
    
    if model is None:
        return {"error": "Model not loaded", "status": "error"}
    
    outputs = {}
    
    try:
        # 导入必要的模块
        from src.utils.inference_utils import load_and_preprocess_images
        from src.models.utils.geometry import depth_to_world_coords_points
        from src.utils.geometry import depth_edge, normals_edge
        from src.utils.visual_util import segment_sky
        from src.utils.save_utils import save_gs_ply
        
        # 加载图片
        image_folder_path = task_dir / "images"
        image_file_paths = sorted([str(p) for p in image_folder_path.glob("*") if p.suffix.lower() in ['.jpg', '.jpeg', '.png', '.webp', '.heic']])
        
        if not image_file_paths:
            return {"error": "No valid images found", "status": "error"}
        
        # 限制图片数量以节省内存 (NPU 内存有限)
        max_images_for_memory = 4  # NPU 上最多处理 4 张图片
        if len(image_file_paths) > max_images_for_memory:
            print(f"[Warning] Too many images ({len(image_file_paths)}), limiting to {max_images_for_memory} for NPU memory")
            image_file_paths = image_file_paths[:max_images_for_memory]
        
        print(f"[Inference] Loading {len(image_file_paths)} images...")
        
        # 清理内存
        if 'npu' in str(device):
            torch.npu.empty_cache()
        gc.collect()
        
        img = load_and_preprocess_images(image_file_paths).to(device)
        
        # 匹配模型的数据类型
        model_dtype = next(model.parameters()).dtype
        if model_dtype == torch.float16:
            img = img.half()
        print(f"[Inference] Input dtype: {img.dtype}")
        
        if img.shape[1] == 0:
            return {"error": "Failed to load images", "status": "error"}
        
        print(f"[Inference] Loaded {img.shape[1]} images, running inference...")
        
        # 准备输入
        inputs = {'img': img}
        
        # NPU 上禁用混合精度 (某些算子如 scatter_add 不支持 bfloat16)
        # 使用 float32 以确保兼容性
        use_amp = False
        amp_dtype = torch.float32
        
        device_type = 'npu' if 'npu' in str(device) else ('cuda' if 'cuda' in str(device) else 'cpu')
        
        # CUDA 上可以使用混合精度，NPU 上暂时禁用
        if device_type == 'cuda':
            try:
                if torch.cuda.is_available() and torch.cuda.is_bf16_supported():
                    use_amp = True
                    amp_dtype = torch.bfloat16
            except:
                pass
        
        # 运行推理
        print(f"[Inference] Running on {device_type}, use_amp={use_amp}, dtype={amp_dtype}")
        
        # 清理 NPU 缓存
        if device_type == 'npu':
            torch.npu.empty_cache()
        
        # 使用 no_grad 减少内存占用，不使用 autocast（NPU float32 不支持 autocast）
        with torch.no_grad():
            predictions = model(inputs)
        
        # 提取输出
        imgs = inputs["img"].permute(0, 1, 3, 4, 2)[0].detach().cpu().numpy()  # S H W 3
        
        # 深度输出
        depth_preds = predictions["depth"][0].detach().cpu().numpy()  # S H W 1
        depth_conf = predictions["depth_conf"][0].detach().cpu().numpy()  # S H W
        
        # 法线输出
        normal_preds = predictions["normals"][0].detach().cpu().numpy()  # S H W 3
        
        # 相机参数
        camera_poses = predictions["camera_poses"][0].detach().cpu().numpy()  # S 4 4
        camera_intrs = predictions["camera_intrs"][0].detach().cpu().numpy()  # S 3 3
        
        # 点云输出
        pts3d_preds = depth_to_world_coords_points(
            predictions["depth"][0, ..., 0],
            predictions["camera_poses"][0],
            predictions["camera_intrs"][0]
        )[0].detach().cpu().numpy()  # S H W 3
        
        num_views = imgs.shape[0]
        
        # 处理每个输出类型
        if "depth" in output_types:
            depth_outputs = []
            for i in range(num_views):
                depth_vis = visualize_depth(depth_preds[i, :, :, 0])
                depth_path = task_dir / f"depth_{i:03d}.png"
                Image.fromarray(depth_vis).save(depth_path)
                depth_outputs.append({
                    "view": i,
                    "file": str(depth_path),
                    "base64": encode_image_base64(depth_vis)
                })
            outputs["depth"] = depth_outputs
        
        if "normal" in output_types:
            normal_outputs = []
            for i in range(num_views):
                normal_vis = visualize_normal(normal_preds[i])
                normal_path = task_dir / f"normal_{i:03d}.png"
                Image.fromarray(normal_vis).save(normal_path)
                normal_outputs.append({
                    "view": i,
                    "file": str(normal_path),
                    "base64": encode_image_base64(normal_vis)
                })
            outputs["normal"] = normal_outputs
        
        if "pose" in output_types:
            outputs["pose"] = {
                "camera_poses": camera_poses.tolist(),
                "camera_intrs": camera_intrs.tolist()
            }
        
        if "pointmap" in output_types:
            # 保存点云为 PLY 格式
            try:
                # 计算 mask
                final_mask = np.ones(depth_conf.shape, dtype=bool)
                
                if apply_confidence_mask:
                    for i in range(num_views):
                        confidences = depth_conf[i]
                        percentile_threshold = np.quantile(confidences, confidence_percentile / 100.0)
                        final_mask[i] = final_mask[i] & (confidences >= percentile_threshold)
                
                # 保存点云
                ply_path = task_dir / "pointcloud.ply"
                save_pointcloud_ply(
                    pts3d_preds, imgs, final_mask, str(ply_path)
                )
                outputs["pointmap"] = {
                    "file": str(ply_path),
                    "format": "ply",
                    "num_views": num_views
                }
            except Exception as e:
                print(f"[Warning] Failed to save pointcloud: {e}")
                outputs["pointmap"] = {"error": str(e)}
        
        if "splats" in output_types and "splats" in predictions:
            # 高斯溅射输出
            try:
                splats_path = task_dir / "splats.ply"
                save_gs_ply(predictions["splats"], str(splats_path))
                outputs["splats"] = {
                    "file": str(splats_path),
                    "format": "ply"
                }
            except Exception as e:
                print(f"[Warning] Failed to save splats: {e}")
                outputs["splats"] = {"error": str(e)}
        
        outputs["status"] = "success"
        outputs["num_views"] = num_views
        
        # 清理内存
        if 'npu' in str(device):
            torch.npu.empty_cache()
        elif 'cuda' in str(device):
            torch.cuda.empty_cache()
        gc.collect()
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        outputs["error"] = str(e)
        outputs["status"] = "error"
    
    return outputs


def save_pointcloud_ply(points: np.ndarray, colors: np.ndarray, mask: np.ndarray, filepath: str):
    """保存点云为 PLY 格式"""
    # points: S H W 3
    # colors: S H W 3 (0-1)
    # mask: S H W
    
    valid_points = []
    valid_colors = []
    
    for i in range(points.shape[0]):
        pts = points[i][mask[i]]  # N 3
        clrs = colors[i][mask[i]]  # N 3
        valid_points.append(pts)
        valid_colors.append(clrs)
    
    all_points = np.concatenate(valid_points, axis=0)
    all_colors = np.concatenate(valid_colors, axis=0)
    
    # 转换颜色到 0-255
    if all_colors.max() <= 1.0:
        all_colors = (all_colors * 255).astype(np.uint8)
    
    # 写入 PLY 文件
    with open(filepath, 'w') as f:
        f.write("ply\n")
        f.write("format ascii 1.0\n")
        f.write(f"element vertex {len(all_points)}\n")
        f.write("property float x\n")
        f.write("property float y\n")
        f.write("property float z\n")
        f.write("property uchar red\n")
        f.write("property uchar green\n")
        f.write("property uchar blue\n")
        f.write("end_header\n")
        
        for pt, clr in zip(all_points, all_colors):
            f.write(f"{pt[0]:.6f} {pt[1]:.6f} {pt[2]:.6f} {int(clr[0])} {int(clr[1])} {int(clr[2])}\n")


# -----------------------------------------------------------
# API 端点
# -----------------------------------------------------------

@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "ok",
        "model_loaded": model is not None,
        "device": str(device) if device else "unknown"
    }


@app.get("/v1/models")
async def list_models():
    """列出可用模型"""
    return {
        "object": "list",
        "data": [{
            "id": "hunyuan-world-mirror",
            "object": "model",
            "created": int(time.time()),
            "owned_by": "tencent",
            "capabilities": ["pointmap", "depth", "normal", "pose", "splats"]
        }]
    }


@app.post("/v1/3d/reconstruct")
async def reconstruct_3d(
    files: List[UploadFile] = File(..., description="输入图片文件列表"),
    output_types: str = Form(default="pointmap,depth,normal", description="输出类型，逗号分隔: pointmap,depth,normal,pose,splats"),
    max_images: int = Form(default=8, description="最大处理图片数"),
    confidence_percentile: float = Form(default=10, description="置信度百分位阈值"),
):
    """
    3D 重建接口
    
    上传一组图片，返回 3D 重建结果（点云、深度图、法线图等）
    
    **支持的输出类型:**
    - pointmap: 点云重建 (PLY格式)
    - depth: 深度估计 (PNG + base64)
    - normal: 表面法线估计 (PNG + base64)
    - pose: 相机姿态估计 (4x4矩阵)
    - splats: 高斯溅射 (PLY格式)
    
    **示例调用:**
    ```bash
    curl -X POST http://localhost:18003/v1/3d/reconstruct \\
      -F "files=@image1.jpg" \\
      -F "files=@image2.jpg" \\
      -F "output_types=pointmap,depth,normal"
    ```
    """
    if not files:
        raise HTTPException(status_code=400, detail="No files uploaded")
    
    if model is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    # 限制图片数量
    if len(files) > max_images:
        files = files[:max_images]
    
    # 创建任务目录
    task_id = str(uuid.uuid4())[:8]
    task_dir = OUTPUT_DIR / task_id
    task_dir.mkdir(parents=True, exist_ok=True)
    
    start_time = time.time()
    
    try:
        # 保存上传的文件
        image_paths = save_uploaded_files(files, task_dir)
        print(f"[API] Task {task_id}: Saved {len(image_paths)} images")
        
        # 解析输出类型
        output_type_list = [t.strip() for t in output_types.split(",")]
        
        # 执行推理
        outputs = run_inference(
            task_dir, 
            output_type_list,
            confidence_percentile=confidence_percentile
        )
        
        processing_time = time.time() - start_time
        
        return JSONResponse(content={
            "task_id": task_id,
            "status": outputs.get("status", "unknown"),
            "outputs": {k: v for k, v in outputs.items() if k not in ["status"]},
            "processing_time": round(processing_time, 2),
            "input_images": len(image_paths)
        })
        
    except Exception as e:
        import traceback
        traceback.print_exc()
        processing_time = time.time() - start_time
        return JSONResponse(
            status_code=500,
            content={
                "task_id": task_id,
                "status": "error",
                "error": str(e),
                "processing_time": round(processing_time, 2)
            }
        )


@app.post("/v1/3d/depth")
async def estimate_depth(
    file: UploadFile = File(..., description="输入图片")
):
    """
    单图深度估计
    
    **示例调用:**
    ```bash
    curl -X POST http://localhost:18003/v1/3d/depth \\
      -F "file=@image.jpg"
    ```
    """
    if model is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    task_id = str(uuid.uuid4())[:8]
    task_dir = OUTPUT_DIR / task_id
    task_dir.mkdir(parents=True, exist_ok=True)
    
    start_time = time.time()
    
    try:
        save_uploaded_files([file], task_dir)
        outputs = run_inference(task_dir, ["depth"])
        processing_time = time.time() - start_time
        
        return {
            "task_id": task_id,
            "status": outputs.get("status", "unknown"),
            "depth": outputs.get("depth", [{}])[0] if outputs.get("depth") else {},
            "processing_time": round(processing_time, 2)
        }
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"error": str(e)}
        )


@app.post("/v1/3d/normal")
async def estimate_normal(
    file: UploadFile = File(..., description="输入图片")
):
    """
    单图法线估计
    
    **示例调用:**
    ```bash
    curl -X POST http://localhost:18003/v1/3d/normal \\
      -F "file=@image.jpg"
    ```
    """
    if model is None:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    task_id = str(uuid.uuid4())[:8]
    task_dir = OUTPUT_DIR / task_id
    task_dir.mkdir(parents=True, exist_ok=True)
    
    start_time = time.time()
    
    try:
        save_uploaded_files([file], task_dir)
        outputs = run_inference(task_dir, ["normal"])
        processing_time = time.time() - start_time
        
        return {
            "task_id": task_id,
            "status": outputs.get("status", "unknown"),
            "normal": outputs.get("normal", [{}])[0] if outputs.get("normal") else {},
            "processing_time": round(processing_time, 2)
        }
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"error": str(e)}
        )


@app.get("/v1/3d/output/{task_id}/{filename}")
async def get_output_file(task_id: str, filename: str):
    """下载输出文件"""
    file_path = OUTPUT_DIR / task_id / filename
    if not file_path.exists():
        raise HTTPException(status_code=404, detail="File not found")
    return FileResponse(file_path)


# -----------------------------------------------------------
# 启动时加载模型
# -----------------------------------------------------------
@app.on_event("startup")
async def startup_event():
    load_model()


# -----------------------------------------------------------
# 运行
# -----------------------------------------------------------
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)
