"""
S4 Custom Image with Gemini Node
Author: S4MUEL
GitHub: https://github.com/S4MUEL-404/ComfyUI-S4API

Custom implementation of Gemini Image generation node that allows users to input their own API key
and does not display chat history.
"""

import json
import base64
import requests
from io import BytesIO
from typing import Optional, List, Dict, Any
import torch
from PIL import Image
import numpy as np
import os
import hashlib


class S4ImageWithGemini:
    """
    Custom Image generation node using Google Gemini API
    Allows user to input their own API key and generates images without displaying chat history
    """
    
    def __init__(self):
        # Ensure cache directory exists
        self.cache_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "cache")
        os.makedirs(self.cache_dir, exist_ok=True)
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": (
                    "STRING",
                    {
                        "multiline": True,
                        "default": "",
                        "tooltip": "Text prompt for image generation"
                    }
                ),
                "api_key": (
                    "STRING",
                    {
                        "multiline": False,
                        "default": "",
                        "tooltip": "Your Google Gemini API Key"
                    }
                ),
                "seed": (
                    "INT",
                    {
                        "default": 42,
                        "min": 0,
                        "max": 0xFFFFFFFFFFFFFFFF,
                        "step": 1,
                        "display": "number",
                        "control_after_generate": True,
                        "tooltip": "Seed for generation consistency"
                    }
                ),
            },
            "optional": {
                "images": (
                    "IMAGE",
                    {
                        "default": None,
                        "tooltip": "Optional reference images for context"
                    }
                ),
            }
        }
    
    RETURN_TYPES = ("IMAGE", "STRING")
    RETURN_NAMES = ("image", "text")
    FUNCTION = "generate_image"
    CATEGORY = "💀PromptsO"
    DESCRIPTION = "Generate images using Google Gemini API with your own API key"
    
    def validate_inputs(self, prompt: str, api_key: str) -> None:
        """Validate input parameters"""
        if not prompt or not prompt.strip():
            raise ValueError("Prompt cannot be empty")
        if not api_key or not api_key.strip():
            raise ValueError("API key cannot be empty")
    
    def generate_cache_key(self, prompt: str, seed: int, images: Optional[torch.Tensor] = None) -> str:
        """Generate cache key based on input parameters"""
        cache_data = {
            "prompt": prompt.strip(),
            "model": "gemini-2.5-flash-image-preview",
            "seed": seed
        }
        
        # Include image hash if images are provided
        if images is not None:
            # Create a simple hash of the image tensor
            image_hash = hashlib.md5(str(images.shape).encode() + str(images.sum().item()).encode()).hexdigest()[:8]
            cache_data["images_hash"] = image_hash
        
        cache_str = json.dumps(cache_data, sort_keys=True)
        return hashlib.md5(cache_str.encode()).hexdigest()
    
    def load_from_cache(self, cache_key: str) -> Optional[tuple]:
        """Load result from cache if exists"""
        cache_file = os.path.join(self.cache_dir, f"{cache_key}_gemini_img.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                    # Load image tensor from file
                    img_file = os.path.join(self.cache_dir, f"{cache_key}_gemini_img.pt")
                    if os.path.exists(img_file):
                        image_tensor = torch.load(img_file, weights_only=True)
                        return (image_tensor, cache_data.get("text", ""))
            except Exception as e:
                print(f"[S4API] ⚠️ Failed to load cache: {e}")
        return None
    
    def save_to_cache(self, cache_key: str, image_tensor: torch.Tensor, text: str) -> None:
        """Save result to cache"""
        try:
            # Save metadata
            cache_file = os.path.join(self.cache_dir, f"{cache_key}_gemini_img.json")
            cache_data = {
                "text": text,
                "timestamp": json.dumps({"time": "cached"})
            }
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
            
            # Save image tensor
            img_file = os.path.join(self.cache_dir, f"{cache_key}_gemini_img.pt")
            torch.save(image_tensor, img_file)
        except Exception as e:
            print(f"[S4API] ⚠️ Failed to save cache: {e}")
    
    def encode_image_to_base64(self, image_tensor: torch.Tensor) -> str:
        """Convert image tensor to base64 string"""
        # Convert tensor to PIL Image
        if len(image_tensor.shape) == 4:
            image_tensor = image_tensor[0]  # Remove batch dimension if present
        
        # Convert from [H, W, C] to [C, H, W] and normalize
        if image_tensor.shape[-1] == 3:  # RGB
            image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
            image_pil = Image.fromarray(image_np)
        else:
            raise ValueError("Unsupported image format")
        
        # Convert to base64
        buffer = BytesIO()
        image_pil.save(buffer, format="PNG")
        image_b64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
        return image_b64
    
    def decode_base64_to_image(self, base64_str: str) -> torch.Tensor:
        """Convert base64 string back to image tensor"""
        try:
            image_data = base64.b64decode(base64_str)
            image_pil = Image.open(BytesIO(image_data))
            
            # Convert to RGB if needed
            if image_pil.mode != "RGB":
                image_pil = image_pil.convert("RGB")
            
            # Convert to tensor
            image_np = np.array(image_pil, dtype=np.float32) / 255.0
            image_tensor = torch.from_numpy(image_np).unsqueeze(0)  # Add batch dimension
            
            return image_tensor
            
        except Exception as e:
            # Return a placeholder image if decoding fails
            placeholder = torch.zeros((1, 1024, 1024, 3))
            return placeholder
    
    def create_text_image(self, text: str) -> torch.Tensor:
        """Create a visual image with text information"""
        try:
            # Create a colorful gradient background
            width, height = 512, 512
            image = torch.zeros((1, height, width, 3))
            
            # Create a gradient background
            for y in range(height):
                for x in range(width):
                    # Create a nice gradient
                    r = 0.3 + 0.4 * (x / width)
                    g = 0.2 + 0.5 * (y / height) 
                    b = 0.6 + 0.3 * ((x + y) / (width + height))
                    
                    image[0, y, x, 0] = r
                    image[0, y, x, 1] = g
                    image[0, y, x, 2] = b
            
            # Add some pattern to indicate text response
            center_x, center_y = width // 2, height // 2
            for y in range(center_y - 50, center_y + 50):
                for x in range(center_x - 100, center_x + 100):
                    if 0 <= x < width and 0 <= y < height:
                        # Create a text box area
                        image[0, y, x, :] *= 0.8
                        image[0, y, x, 0] += 0.2  # Add some red tint
            
            return image
            
        except Exception:
            # Fallback to simple colored rectangle
            return torch.ones((1, 512, 512, 3)) * torch.tensor([0.7, 0.5, 0.3])
    
    def prepare_request_payload(self, prompt: str, images: Optional[torch.Tensor] = None) -> Dict[str, Any]:
        """Prepare the API request payload for Gemini Image Generation"""
        # Use the original prompt directly for better image generation
        parts = [{"text": prompt}]
        
        # Add reference images if provided
        if images is not None:
            print(f"[S4API] 🖼️ Adding {images.shape[0] if len(images.shape) == 4 else 1} reference image(s)")
            if len(images.shape) == 4:  # Batch of images
                for i in range(images.shape[0]):
                    image_b64 = self.encode_image_to_base64(images[i])
                    parts.append({
                        "inlineData": {
                            "mimeType": "image/png",
                            "data": image_b64
                        }
                    })
            else:  # Single image
                image_b64 = self.encode_image_to_base64(images)
                parts.append({
                    "inlineData": {
                        "mimeType": "image/png",
                        "data": image_b64
                    }
                })
        
        payload = {
            "contents": [
                {
                    "role": "user",
                    "parts": parts
                }
            ],
            "generationConfig": {
                "responseModalities": ["TEXT", "IMAGE"],
                "temperature": 0.4,  # Lower temperature for more consistent image generation
                "topK": 32,
                "topP": 0.95
            }
        }
        
        return payload
    
    def call_gemini_api(self, api_key: str, payload: Dict[str, Any]) -> Dict[str, Any]:
        """Make API call to Google Gemini Image Generation directly"""
        # Use the correct Gemini Image model
        model = "gemini-2.5-flash-image-preview"
        url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}"
        
        headers = {
            "Content-Type": "application/json"
        }
        
        print(f"[S4API] 🚀 Starting image generation with {model}...")
        print(f"[S4API] 📝 Prompt: {payload['contents'][0]['parts'][0]['text'][:100]}...")
        
        try:
            print(f"[S4API] 🌐 Sending request to Gemini API...")
            response = requests.post(url, headers=headers, json=payload, timeout=120)
            
            if response.status_code == 200:
                result = response.json()
                print(f"[S4API] ✅ Request successful!")
                
                # Log token usage information
                if "usageMetadata" in result:
                    usage = result["usageMetadata"]
                    print(f"[S4API] 📊 Token Usage:")
                    print(f"[S4API]   - Prompt tokens: {usage.get('promptTokenCount', 0)}")
                    print(f"[S4API]   - Response tokens: {usage.get('candidatesTokenCount', 0)}")
                    print(f"[S4API]   - Total tokens: {usage.get('totalTokenCount', 0)}")
                
                # Check if we got image data
                if "candidates" in result and result["candidates"]:
                    candidate = result["candidates"][0]
                    if "content" in candidate and "parts" in candidate["content"]:
                        parts = candidate["content"]["parts"]
                        image_found = any("inlineData" in part for part in parts)
                        if image_found:
                            print(f"[S4API] 🖼️  Image generated successfully!")
                        else:
                            print(f"[S4API] ⚠️  No image data in response")
                
                return result
            else:
                error_text = response.text
                print(f"[S4API] ❌ API Error {response.status_code}: {error_text}")
                raise Exception(f"API request failed with status {response.status_code}: {error_text}")
                
        except requests.exceptions.RequestException as e:
            print(f"[S4API] ❌ Network Error: {str(e)}")
            raise Exception(f"Network request failed: {str(e)}")
        except json.JSONDecodeError as e:
            print(f"[S4API] ❌ JSON Decode Error: {str(e)}")
            raise Exception(f"Failed to decode API response: {str(e)}")
    
    def extract_response_content(self, api_response: Dict[str, Any]) -> tuple[Optional[torch.Tensor], str]:
        """Extract image and text from API response"""
        output_image = None
        output_text = ""
        
        print(f"[S4API] 🔍 Processing API response...")
        
        try:
            if "candidates" in api_response and api_response["candidates"]:
                candidate = api_response["candidates"][0]
                
                # Check if response has content with parts
                if "content" in candidate and "parts" in candidate["content"]:
                    parts = candidate["content"]["parts"]
                    print(f"[S4API] 📄 Found {len(parts)} response parts")
                    
                    for i, part in enumerate(parts):
                        # Extract text
                        if "text" in part:
                            text_content = part["text"]
                            output_text += text_content
                            print(f"[S4API] 📝 Part {i+1}: Text content ({len(text_content)} chars)")
                        
                        # Extract image - using correct field names
                        if "inlineData" in part:
                            inline_data = part["inlineData"]
                            mime_type = inline_data.get("mimeType")
                            image_data = inline_data.get("data", "")
                            
                            if mime_type in ["image/png", "image/jpeg"] and image_data:
                                print(f"[S4API] 🖼️ Part {i+1}: Image data found ({mime_type}, {len(image_data)} chars)")
                                print(f"[S4API] 🔄 Decoding image...")
                                output_image = self.decode_base64_to_image(image_data)
                                if output_image is not None:
                                    height, width = output_image.shape[1], output_image.shape[2]
                                    print(f"[S4API] ✅ Image decoded successfully: {width}x{height}")
                
                # Handle case where candidate only has finishReason (no content)
                elif "finishReason" in candidate and candidate["finishReason"] == "STOP":
                    output_text = "Request completed successfully, but no content was generated."
                    print(f"[S4API] ⚠️ Response finished but no content found")
            
            # Create a visual representation when no actual image is returned
            if output_image is None:
                print(f"[S4API] ⚠️ No image found in response, creating placeholder")
                output_image = self.create_text_image(output_text if output_text else "No image generated")
            
            # Provide default text if no text was returned  
            if not output_text:
                output_text = "Image generated successfully (no text description provided)"
                
        except Exception as e:
            print(f"[S4API] ❌ Error processing response: {str(e)}")
            # Return colorful placeholder on error
            placeholder = torch.ones((1, 512, 512, 3)) * 0.3
            placeholder[:, :, :, 0] = 0.8  # Red-tinted error indicator
            output_image = placeholder
            output_text = f"Error processing response: {str(e)}"
        
        return output_image, output_text
    
    def generate_image(
        self,
        prompt: str,
        api_key: str,
        seed: int = 42,
        images: Optional[torch.Tensor] = None
    ) -> tuple[torch.Tensor, str]:
        """Main function to generate image using Gemini API"""
        
        try:
            # Validate inputs
            self.validate_inputs(prompt, api_key)
            
            # Generate cache key
            cache_key = self.generate_cache_key(prompt, seed, images)
            
            # Check cache first
            cached_result = self.load_from_cache(cache_key)
            if cached_result:
                print(f"[S4API] 💾 Using cached result for seed {seed}")
                return cached_result
            
            print(f"[S4API] 🎯 Generating new image with seed {seed}")
            print(f"[S4API] 🔑 Cache key: {cache_key[:8]}...")
            
            # Prepare request payload
            payload = self.prepare_request_payload(prompt, images)
            
            # Make API call  
            api_response = self.call_gemini_api(api_key, payload)
            
            # Extract results
            output_image, output_text = self.extract_response_content(api_response)
            
            # Add token usage info to text output
            if "usageMetadata" in api_response:
                usage = api_response["usageMetadata"]
                usage_info = f"\n\n[Token Usage] Prompt: {usage.get('promptTokenCount', 0)}, Response: {usage.get('candidatesTokenCount', 0)}, Total: {usage.get('totalTokenCount', 0)}"
                usage_info += f"\n[Model] {api_response.get('modelVersion', 'gemini-2.5-flash-image-preview')}"
                output_text += usage_info
            
            # Save to cache
            self.save_to_cache(cache_key, output_image, output_text)
            print(f"[S4API] 💾 Result saved to cache")
            
            return (output_image, output_text)
            
        except Exception as e:
            # Return error message and placeholder image
            error_text = f"Error generating image: {str(e)}"
            print(f"[S4API] ❌ {error_text}")
            import traceback
            traceback.print_exc()
            
            # Create a red-tinted error placeholder instead of black
            placeholder_image = torch.ones((1, 512, 512, 3)) * 0.2
            placeholder_image[:, :, :, 0] = 0.9  # Red error indicator
            return (placeholder_image, error_text)
