import os
import time
import base64
from io import BytesIO
from abc import ABC, abstractmethod

import numpy as np
import requests
import cv2
import torch

__VERSION__ = "v1.1.0"

class BaseImageGeneratorNode(ABC):
    """图像生成节点的基类，提供公共功能和抽象接口"""

    CATEGORY = "AmorForge"
    RETURN_TYPES = ("IMAGE",)
    RETURN_NAMES = ("generated_image",)
    OUTPUT_NODE = False
    FUNCTION = "generate_image"

    def __init__(self):
        """初始化基类属性"""
        self.api_key = None
        self.model_name = None

    @classmethod
    def INPUT_TYPES(cls):
        """定义输入参数，子类应重写此方法"""
        return {
            "required": {},
            "optional": {}
        }

    @abstractmethod
    def generate_image(self, **kwargs):
        """抽象方法，子类必须实现具体的图像生成逻辑"""
        pass

    def tensor_to_base64(self, image_tensor):
        """将ComfyUI图像张量转换为base64编码的JPEG图像"""
        # Convert ComfyUI image tensor to base64
        image_np = image_tensor[0].cpu().numpy()
        image_np = (image_np * 255).astype(np.uint8)
        _, buffer = cv2.imencode('.jpg', image_np)
        image_base64 = base64.b64encode(buffer).decode('utf-8')
        return image_base64

    def base64_to_tensor(self, base64_data):
        """将base64编码的图像数据转换为ComfyUI图像张量"""
        image = cv2.imdecode(np.frombuffer(base64.b64decode(base64_data), np.uint8), cv2.IMREAD_COLOR)
        image_np = image.astype(np.float32) / 255.0
        image_tensor = torch.from_numpy(image_np)[None,]
        return image_tensor

    def download_image_to_tensor(self, image_url):
        """从URL下载图像并转换为ComfyUI图像张量"""
        image_response = requests.get(image_url)
        if image_response.status_code != 200:
            raise Exception(f"Failed to download image from URL. Status code: {image_response.status_code}")

        image = cv2.imdecode(np.frombuffer(image_response.content, np.uint8), cv2.IMREAD_COLOR)
        image_np = image.astype(np.float32) / 255.0
        image_tensor = torch.from_numpy(image_np)[None,]
        return image_tensor

class CogViewImageGeneratorNode(BaseImageGeneratorNode):
    def __init__(self):
        super().__init__()
        # Initialize reusable properties
        self.api_key = os.environ.get("OPENAI_API_KEY")  # 使用OPENAI兼容的环境变量，来调用zhipu_api_key
        self.url = "https://open.bigmodel.cn/api/paas/v4/images/generations"
        self.headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        } if self.api_key else {}

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": (["cogview-3-flash", "cogview-4-250304", "cogview-4"],),
                "prompt": ("STRING", {"multiline": True, "default": "a girl"}),
                "size": (["1024x1024", "768x1344", "864x1152", "1344x768", "1152x864", "1440x720", "720x1440"],),
                "seed": ("INT", {"default": 65536, "min": 0, "max": 1000000}),
            }
        }

    def generate_image(self, model, prompt, size, seed):
        """Generate image using ZhipuAI CogView model"""
        if not self.api_key:
            error_msg = "OPENAI_API_KEY environment variable not set. Please set your OpenAI API key."
            print(f"[CogViewImageGeneratorNode] Error: {error_msg}")
            raise Exception(error_msg)

        payload = {
            "model": model,
            "prompt": prompt,
            "size": size,
            "seed": seed
        }

        # ZhipuAI CogView API request，与OpenAI不兼容，故而使用requests方式调用
        response = requests.post(self.url, headers=self.headers, json=payload)
        if response.status_code != 200:
            error_msg = f"API request failed with status code {response.status_code}: {response.text}"
            print(f"[CogViewImageGeneratorNode] Error: {error_msg}")
            raise Exception(error_msg)

        try:
            response_data = response.json()
        except Exception as e:
            error_msg = f"Failed to parse JSON response: {str(e)}. Response content: {response.text}"
            print(f"[CogViewImageGeneratorNode] Error: {error_msg}")
            raise Exception(error_msg)

        # Extract image URL from response
        if "data" not in response_data or not response_data["data"]:
            error_msg = "No image data in response"
            print(f"[CogViewImageGeneratorNode] Error: {error_msg}")
            raise Exception(error_msg)

        image_url = response_data["data"][0]["url"]
        print(f"[CogViewImageGeneratorNode] Image generated successfully. URL: {image_url}")

        # Download the image and convert to tensor using base class method
        image_tensor = self.download_image_to_tensor(image_url)
        return (image_tensor,)

class GeminiImageGeneratorNode(BaseImageGeneratorNode):
    def __init__(self):
        super().__init__()
        from .tools.util_gemini_sdk import GeminiModelApi

        self.gemini_api = GeminiModelApi()
        self.gemini_api.connect(model_tag="gemini-2.5-flash-image-preview")

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": ("STRING", {"multiline": True, "default": "a girl"}),
                "size": (["1024x1024", "768x1344", "864x1152", "1344x768", "1152x864", "1440x960", "960x1440"],),
                "seed": ("INT", {"default": 65536, "min": 0, "max": 1000000}),
            },
            "optional": {
                "image1": ("IMAGE",),
                "image2": ("IMAGE",),
            }
        }

    def generate_image(self, prompt, size, seed, image1=None, image2=None):
        if not self.gemini_api:
            error_msg = "Gemini API not initialized. Please check your GEMINI_API_KEY environment variable."
            print(f"[GeminiImageGeneratorNode] Error: {error_msg}")
            raise Exception(error_msg)

        # Prepare reference images for Gemini API
        reference_images = []
        if image1 is not None:
            # Convert ComfyUI image tensor to base64 using base class method
            image1_base64 = self.tensor_to_base64(image1)
            reference_images.append(image1_base64)

        if image2 is not None:
            # Convert ComfyUI image tensor to base64 using base class method
            image2_base64 = self.tensor_to_base64(image2)
            reference_images.append(image2_base64)

        # Call Gemini API to generate image with reference images
        if reference_images:
            print(f"[GeminiImageGeneratorNode] Using {len(reference_images)} reference images")
            response = self.gemini_api.generate_image(prompt, size, seed, reference_images)
        else:
            response = self.gemini_api.generate_image(prompt, size, seed)

        # print(f"[GeminiImageGeneratorNode] API Response: {response}")

        # Check if we have actual image data
        if not response.get("image_data"):
            error_msg = "No image data in response"
            print(f"[GeminiImageGeneratorNode] Error: {error_msg}")
            raise Exception(error_msg)

        # Process the actual image returned from the API using base class method
        image_data = response["image_data"]
        image_tensor = self.base64_to_tensor(image_data["data"])
        print(">>>", image_tensor.shape)
        return (image_tensor,)
