"""
S4 Custom Image with Grok Node
Author: S4MUEL
GitHub: https://github.com/S4MUEL-404/ComfyUI-S4API

Grok image generation node with custom API key input.
"""

import json
import base64
import requests
from io import BytesIO
from typing import Optional
import torch
from PIL import Image
import numpy as np
import os
import hashlib


class S4ImageWithGrok:
    """
    Generates images via Grok API endpoint.
    """

    # Fixed API configuration  
    API_URL = "https://api.x.ai/v1/images/generations"
    API_MODEL = "grok-2-image-1212"

    def __init__(self):
        # Ensure cache directory exists
        self.cache_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "cache")
        os.makedirs(self.cache_dir, exist_ok=True)

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": (
                    "STRING",
                    {
                        "multiline": True,
                        "default": "",
                        "tooltip": "Text prompt for image generation with Grok",
                        "placeholder": "Enter your image generation prompt here..."
                    },
                ),
                "api_key": (
                    "STRING",
                    {
                        "multiline": False,
                        "default": "",
                        "tooltip": "Your Grok API Key",
                        "placeholder": "xai-..."
                    }
                ),
                "seed": (
                    "INT",
                    {
                        "default": 0,
                        "min": 0,
                        "max": 2147483647,
                        "step": 1,
                        "display": "number",
                        "control_after_generate": True,
                        "tooltip": "Random seed for reproducible results (32-bit limit for Grok API)",
                    },
                ),
            },
            "optional": {
                "images": (
                    "IMAGE",
                    {
                        "default": None,
                        "tooltip": "Optional reference images for context"
                    }
                ),
            }
        }

    RETURN_TYPES = ("IMAGE", "STRING")
    RETURN_NAMES = ("image", "text")
    FUNCTION = "generate_image"
    CATEGORY = "💀PromptsO"
    DESCRIPTION = "Generates images using Grok API with custom API key."
    
    def validate_inputs(self, prompt: str, api_key: str) -> None:
        """Validate input parameters"""
        if not prompt or not prompt.strip():
            raise ValueError("Prompt cannot be empty")
        if not api_key or not api_key.strip():
            raise ValueError("API key cannot be empty")
    
    def encode_image_to_base64(self, image_tensor: torch.Tensor) -> str:
        """Convert image tensor to base64 string"""
        
        # Convert tensor to PIL Image
        if len(image_tensor.shape) == 4:
            image_tensor = image_tensor[0]  # Remove batch dimension if present
        
        # Convert from [H, W, C] to [C, H, W] and normalize
        if image_tensor.shape[-1] == 3:  # RGB
            image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
            image_pil = Image.fromarray(image_np)
        else:
            raise ValueError("Unsupported image format")
        
        # Convert to base64
        buffer = BytesIO()
        image_pil.save(buffer, format="PNG")
        image_b64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
        return image_b64
    
    def generate_vision_cache_key(self, prompt: str, images: torch.Tensor) -> str:
        """Generate cache key for vision analysis"""
        cache_data = {
            "prompt": prompt.strip(),
            "vision_model": "grok-2-vision-1212"
        }
        
        # Include image hash
        image_hash = hashlib.md5(str(images.shape).encode() + str(images.sum().item()).encode()).hexdigest()[:8]
        cache_data["images_hash"] = image_hash
        
        cache_str = json.dumps(cache_data, sort_keys=True)
        return hashlib.md5(cache_str.encode()).hexdigest()
    
    def generate_cache_key(self, final_prompt: str, seed: int, images: Optional[torch.Tensor] = None) -> str:
        """Generate cache key based on final parameters used for generation"""
        cache_data = {
            "prompt": final_prompt.strip(),  # Use final prompt instead of original
            "model": self.API_MODEL,
            "seed": seed
        }
        
        # Include image hash if images are provided (for consistency)
        if images is not None:
            image_hash = hashlib.md5(str(images.shape).encode() + str(images.sum().item()).encode()).hexdigest()[:8]
            cache_data["images_hash"] = image_hash
        
        cache_str = json.dumps(cache_data, sort_keys=True)
        return hashlib.md5(cache_str.encode()).hexdigest()
    
    def load_vision_cache(self, vision_cache_key: str) -> Optional[str]:
        """Load vision analysis result from cache"""
        cache_file = os.path.join(self.cache_dir, f"{vision_cache_key}_vision.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                    return cache_data.get("enhanced_prompt")
            except Exception as e:
                print(f"⚠️ Failed to load vision cache: {e}")
        return None
    
    def save_vision_cache(self, vision_cache_key: str, enhanced_prompt: str) -> None:
        """Save vision analysis result to cache"""
        try:
            cache_file = os.path.join(self.cache_dir, f"{vision_cache_key}_vision.json")
            cache_data = {
                "enhanced_prompt": enhanced_prompt,
                "timestamp": json.dumps({"time": "cached"})
            }
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            print(f"⚠️ Failed to save vision cache: {e}")
    
    def load_from_cache(self, cache_key: str) -> Optional[tuple]:
        """Load result from cache if exists"""
        cache_file = os.path.join(self.cache_dir, f"{cache_key}_grok_img.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                    # Load image tensor from file
                    img_file = os.path.join(self.cache_dir, f"{cache_key}_grok_img.pt")
                    if os.path.exists(img_file):
                        image_tensor = torch.load(img_file, weights_only=True)
                        return (image_tensor, cache_data.get("text", ""))
            except Exception as e:
                print(f"⚠️ Failed to load cache: {e}")
        return None
    
    def save_to_cache(self, cache_key: str, image_tensor: torch.Tensor, text: str) -> None:
        """Save result to cache"""
        try:
            # Save metadata
            cache_file = os.path.join(self.cache_dir, f"{cache_key}_grok_img.json")
            cache_data = {
                "text": text,
                "timestamp": json.dumps({"time": "cached"})
            }
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
            
            # Save image tensor
            img_file = os.path.join(self.cache_dir, f"{cache_key}_grok_img.pt")
            torch.save(image_tensor, img_file)
        except Exception as e:
            print(f"⚠️ Failed to save cache: {e}")
    
    def generate_image(
        self,
        prompt,
        api_key,
        seed=0,
        images=None,
    ):
        self.validate_inputs(prompt, api_key)
        
        # If reference images are provided, analyze them with Grok Vision (with caching)
        enhanced_prompt = prompt
        if images is not None:
            # Check vision cache first
            vision_cache_key = self.generate_vision_cache_key(prompt, images)
            cached_enhanced_prompt = self.load_vision_cache(vision_cache_key)
            
            if cached_enhanced_prompt:
                print(f"💾 Using cached vision analysis")
                enhanced_prompt = cached_enhanced_prompt
            else:
                print(f"🖼️ Analyzing reference image(s) with Grok Vision...")
                
                # Get image base64
                if len(images.shape) == 4:  # Batch of images
                    image_b64 = self.encode_image_to_base64(images[0])
                else:  # Single image
                    image_b64 = self.encode_image_to_base64(images)
                
                # Use Grok Vision to analyze the image with length constraint
                vision_prompt = f"Look at this image and create a short, concise image generation prompt (under 800 characters) that captures the key visual style, colors, and elements from this reference image. Incorporate this instruction: {prompt}. Focus on the most important visual aspects only."
                
                vision_data = {
                    "model": "grok-2-vision-1212",
                    "messages": [
                        {
                            "role": "user",
                            "content": [
                                {"type": "text", "text": vision_prompt},
                                {
                                    "type": "image_url",
                                    "image_url": {"url": f"data:image/png;base64,{image_b64}"}
                                }
                            ]
                        }
                    ],
                    "max_tokens": 500,
                    "temperature": 0.7
                }
                
                try:
                    # Call Grok Vision API
                    vision_headers = {
                        "Authorization": f"Bearer {api_key}",
                        "Content-Type": "application/json"
                    }
                    
                    vision_response = requests.post(
                        "https://api.x.ai/v1/chat/completions",
                        headers=vision_headers,
                        json=vision_data,
                        timeout=60
                    )
                    
                    if vision_response.status_code == 200:
                        vision_result = vision_response.json()
                        if "choices" in vision_result and vision_result["choices"]:
                            enhanced_prompt = vision_result["choices"][0]["message"]["content"]
                            
                            # Ensure prompt is within length limit (1024 characters for Grok)
                            if len(enhanced_prompt) > 1000:
                                # Truncate to safe length
                                enhanced_prompt = enhanced_prompt[:1000].rsplit(' ', 1)[0] + "..."
                                print(f"✂️ Enhanced prompt truncated to fit length limit")
                            
                            # Save to vision cache
                            self.save_vision_cache(vision_cache_key, enhanced_prompt)
                            print(f"✨ Enhanced prompt from vision analysis ({len(enhanced_prompt)} chars)")
                        else:
                            print(f"⚠️ Vision analysis failed, using original prompt")
                    else:
                        print(f"⚠️ Vision API error: {vision_response.status_code}, using original prompt")
                except Exception as e:
                    print(f"⚠️ Vision analysis error: {e}, using original prompt")
        
        # Ensure final prompt is within length limit
        final_prompt = enhanced_prompt
        if len(final_prompt) > 1000:
            # Truncate to safe length
            final_prompt = final_prompt[:1000].rsplit(' ', 1)[0] + "..."
            print(f"✂️ Final prompt truncated to {len(final_prompt)} characters")
        
        # Generate cache key based on final prompt (this is the key fix)
        cache_key = self.generate_cache_key(final_prompt, seed, images)
        
        # Check cache for final result
        cached_result = self.load_from_cache(cache_key)
        if cached_result:
            print(f"💾 Using cached image generation result for seed {seed}")
            return cached_result
        
        # Prepare request data for image generation
        data = {
            "prompt": final_prompt,
            "n": 1,
            "model": self.API_MODEL
        }
        
        # Add seed for reproducible results
        if seed > 0:
            data["seed"] = seed
        
        # Make API call
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        print(f"🤖 Generating image with Grok...")
        print(f"   • Model: {self.API_MODEL}")
        print(f"   • Original prompt: {prompt[:50]}{'...' if len(prompt) > 50 else ''}")
        if images is not None:
            print(f"   • Reference images: {images.shape[0] if len(images.shape) == 4 else 1}")
            print(f"   • Enhanced prompt: {enhanced_prompt[:50]}{'...' if len(enhanced_prompt) > 50 else ''}")
        print(f"   • Final prompt length: {len(final_prompt)} characters")
        print(f"   • Seed: {seed}")
        print(f"   • Cache key: {cache_key[:8]}...")
        
        try:
            response = requests.post(
                self.API_URL, 
                headers=headers, 
                json=data, 
                timeout=120
            )
            
            if response.status_code != 200:
                error_data = response.json() if response.content else {}
                error_msg = error_data.get("error", response.text)
                raise Exception(f"API request failed with status {response.status_code}: {error_msg}")
            
            # Parse response
            result = response.json()
            
            if "data" not in result or not result["data"]:
                raise Exception("No images returned from API")
            
            # Download and process images
            image_tensors = []
            response_texts = []
            
            for i, img_data in enumerate(result["data"]):
                img_url = img_data["url"]
                revised_prompt = img_data.get("revised_prompt", prompt)
                
                print(f"🔄 Downloading image {i+1}/{len(result['data'])}...")
                
                # Download image
                img_response = requests.get(img_url, timeout=60)
                img_response.raise_for_status()
                
                # Convert to PIL Image
                image_pil = Image.open(BytesIO(img_response.content))
                
                # Convert to RGB if needed
                if image_pil.mode != "RGB":
                    image_pil = image_pil.convert("RGB")
                
                # Convert to tensor
                image_np = np.array(image_pil, dtype=np.float32) / 255.0
                image_tensor = torch.from_numpy(image_np).unsqueeze(0)
                image_tensors.append(image_tensor)
                
                # Store response info
                response_texts.append(f"Image {i+1}: {revised_prompt}")
            
            # Concatenate all images
            final_image_tensor = torch.cat(image_tensors, dim=0)
            final_text = "\\n".join(response_texts)
            
            print(f"✅ Images generated successfully!")
            print(f"   • Generated {len(image_tensors)} image(s)")
            print(f"   • Image tensor shape: {final_image_tensor.shape}")
            
            # Save to cache
            self.save_to_cache(cache_key, final_image_tensor, final_text)
            
            return (final_image_tensor, final_text)
            
        except Exception as e:
            print(f"❌ Error generating image: {str(e)}")
            raise e