import os
import json
import shutil
import hashlib
import base64
from io import BytesIO
from PIL import Image
from server import PromptServer
from aiohttp import web
import folder_paths
import numpy as np
import torch

# Global variables to store the most recent captured images
latest_captured_image = None
latest_depth_image = None


class ThreeDModelViewer:
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "model_path": ("STRING", {"default": "", "multiline": False}),
                "canvas_width": ("INT", {"default": 512, "min": 512, "max": 2048}),
                "canvas_height": ("INT", {"default": 512, "min": 512, "max": 2048}),
                "camera_x": (
                    "FLOAT",
                    {
                        "default": 0.0,
                        "min": -50.0,
                        "max": 50.0,
                        "step": 0.1,
                        "display": "number",
                    },
                ),
                "camera_y": (
                    "FLOAT",
                    {
                        "default": 0.0,
                        "min": -50.0,
                        "max": 50.0,
                        "step": 0.1,
                        "display": "number",
                    },
                ),
                "camera_z": (
                    "FLOAT",
                    {
                        "default": 0.0,
                        "min": -50.0,
                        "max": 50.0,
                        "step": 0.1,
                        "display": "number",
                    },
                ),
                "lookat_x": (
                    "FLOAT",
                    {
                        "default": 0.0,
                        "min": -50.0,
                        "max": 50.0,
                        "step": 0.1,
                        "display": "number",
                    },
                ),
                "lookat_y": (
                    "FLOAT",
                    {
                        "default": 0.0,
                        "min": -50.0,
                        "max": 50.0,
                        "step": 0.1,
                        "display": "number",
                    },
                ),
                "lookat_z": (
                    "FLOAT",
                    {
                        "default": 0.0,
                        "min": -50.0,
                        "max": 50.0,
                        "step": 0.1,
                        "display": "number",
                    },
                ),
                "camera_near": (
                    "FLOAT",
                    {
                        "default": 0.1,
                        "min": 0.01,
                        "max": 10.0,
                        "step": 0.01,
                        "display": "number",
                    },
                ),
                "camera_far": (
                    "FLOAT",
                    {
                        "default": 10.0,
                        "min": 1.0,
                        "max": 100.0,
                        "step": 1.0,
                        "display": "number",
                    },
                ),
                "focal_length": (
                    "FLOAT",
                    {
                        "default": 50.0,
                        "min": 10.0,
                        "max": 200.0,
                        "step": 1.0,
                        "display": "number",
                    },
                ),
            },
        }

    RETURN_TYPES = ("IMAGE", "IMAGE")
    RETURN_NAMES = ("rendered_image", "depth_map")
    FUNCTION = "load_model"
    CATEGORY = "aesethtics/3D"
    OUTPUT_NODE = False

    def load_model(
        self,
        model_path,
        canvas_width,
        canvas_height,
        camera_x,
        camera_y,
        camera_z,
        lookat_x,
        lookat_y,
        lookat_z,
        camera_near,
        camera_far,
        focal_length,
    ):
        if not model_path:
            return {
                "ui": {"text": ["No model path provided"]},
                "result": ("No model path provided", None),
            }
        if not os.path.exists(model_path):
            return {
                "ui": {"text": [f"Model file not found: {model_path}"]},
                "result": (f"Model file not found: {model_path}", None),
            }

        valid_extensions = [".glb", ".gltf"]
        if not any(model_path.lower().endswith(ext) for ext in valid_extensions):
            return {
                "ui": {
                    "text": [
                        "Invalid file format. Only GLB and GLTF files are supported."
                    ]
                },
                "result": ("Invalid file format", None),
            }

        web_url = self.create_web_accessible_url(model_path)
        model_info = {
            "path": model_path,
            "web_url": web_url,
            "width": canvas_width,
            "height": canvas_height,
            "filename": os.path.basename(model_path),
            "camera": {"x": camera_x, "y": camera_y, "z": camera_z},
            "lookat": {"x": lookat_x, "y": lookat_y, "z": lookat_z},
            "near": camera_near,
            "far": camera_far,
            "focal_length": focal_length,
        }

        # Get the latest captured images
        global latest_captured_image, latest_depth_image

        # Create default blank images if no captures yet
        blank_img = torch.zeros((1, 512, 512, 3), dtype=torch.float32)

        captured_img = (
            latest_captured_image if latest_captured_image is not None else blank_img
        )
        depth_img = latest_depth_image if latest_depth_image is not None else blank_img

        return {
            "ui": {"threejs_viewer": [model_info]},
            "result": (captured_img, depth_img),
        }

    def create_web_accessible_url(self, model_path):
        temp_dir = os.path.join(folder_paths.base_path, "web", "temp_models")
        os.makedirs(temp_dir, exist_ok=True)
        file_hash = hashlib.md5(model_path.encode()).hexdigest()[:8]
        filename = os.path.basename(model_path)
        temp_filename = f"{file_hash}_{filename}"
        temp_path = os.path.join(temp_dir, temp_filename)
        if not os.path.exists(temp_path):
            shutil.copy2(model_path, temp_path)
        return f"/temp_models/{temp_filename}"


# Node mapping
NODE_CLASS_MAPPINGS = {"ThreeDModelViewer": ThreeDModelViewer}
NODE_DISPLAY_NAME_MAPPINGS = {"ThreeDModelViewer": "3D Model Viewer"}


# Serve temp models
@PromptServer.instance.routes.get("/temp_models/{filename}")
async def serve_temp_model(request):
    filename = request.match_info["filename"]
    temp_dir = os.path.join(folder_paths.base_path, "web", "temp_models")
    file_path = os.path.join(temp_dir, filename)
    if not os.path.exists(file_path):
        raise web.HTTPNotFound()
    return web.FileResponse(file_path)


# Prepare model file for viewing
@PromptServer.instance.routes.post("/threejs_prepare_model")
async def prepare_model(request):
    data = await request.json()
    model_path = data.get("model_path", "")

    if not model_path:
        return web.json_response({"success": False, "error": "No model path provided"})

    if not os.path.exists(model_path):
        return web.json_response({"success": False, "error": "Model file not found"})

    # Validate file format
    valid_extensions = [".glb", ".gltf"]
    if not any(model_path.lower().endswith(ext) for ext in valid_extensions):
        return web.json_response(
            {
                "success": False,
                "error": "Invalid file format. Only GLB and GLTF files are supported.",
            }
        )

    # Use the same logic as create_web_accessible_url
    temp_dir = os.path.join(folder_paths.base_path, "web", "temp_models")
    os.makedirs(temp_dir, exist_ok=True)
    file_hash = hashlib.md5(model_path.encode()).hexdigest()[:8]
    filename = os.path.basename(model_path)
    temp_filename = f"{file_hash}_{filename}"
    temp_path = os.path.join(temp_dir, temp_filename)

    if not os.path.exists(temp_path):
        try:
            shutil.copy2(model_path, temp_path)
        except Exception as e:
            return web.json_response(
                {"success": False, "error": f"Failed to copy file: {str(e)}"}
            )

    web_url = f"/temp_models/{temp_filename}"
    return web.json_response(
        {
            "success": True,
            "web_url": web_url,
            "filename": filename,
            "temp_filename": temp_filename,
        }
    )


# Capture rendered image from Three.js canvas
@PromptServer.instance.routes.post("/threejs_capture_image")
async def capture_image(request):
    global latest_captured_image

    try:
        data = await request.json()
        data_url = data.get("dataURL", "")

        if not data_url.startswith("data:image/png;base64,"):
            return web.json_response(
                {"success": False, "error": "Invalid dataURL format"}
            )

        # Extract base64 data
        b64_data = data_url.split(",", 1)[1]
        img_bytes = base64.b64decode(b64_data)

        # Convert to PIL Image
        pil_image = Image.open(BytesIO(img_bytes)).convert("RGB")

        # Convert PIL Image to ComfyUI tensor format
        img_array = np.array(pil_image).astype(np.float32) / 255.0
        img_tensor = torch.from_numpy(img_array)[None,]  # Add batch dimension

        # Store the tensor globally
        latest_captured_image = img_tensor

        return web.json_response({"success": True})

    except Exception as e:
        return web.json_response({"success": False, "error": str(e)})


# Capture depth map from Three.js canvas
@PromptServer.instance.routes.post("/threejs_capture_depth")
async def capture_depth(request):
    global latest_depth_image

    try:
        data = await request.json()
        data_url = data.get("dataURL", "")

        if not data_url.startswith("data:image/png;base64,"):
            return web.json_response(
                {"success": False, "error": "Invalid dataURL format"}
            )

        # Extract base64 data
        b64_data = data_url.split(",", 1)[1]
        img_bytes = base64.b64decode(b64_data)

        # Convert to PIL Image
        pil_image = Image.open(BytesIO(img_bytes)).convert("RGB")

        # Convert PIL Image to ComfyUI tensor format
        img_array = np.array(pil_image).astype(np.float32) / 255.0
        img_tensor = torch.from_numpy(img_array)[None,]  # Add batch dimension

        # Store the tensor globally
        latest_depth_image = img_tensor

        return web.json_response({"success": True})

    except Exception as e:
        return web.json_response({"success": False, "error": str(e)})
