"""
S4 Custom OpenAI Text Node  
Author: S4MUEL
GitHub: https://github.com/S4MUEL-404/ComfyUI-S4API

OpenAI text generation node with custom API key input.
"""

import requests
import json
import os
import hashlib
import base64
import io
from PIL import Image
from typing import Optional
import torch


class S4TextWithOpenAI:
    """
    Generates text via OpenAI API endpoint.
    """

    # Fixed API configuration
    API_URL = "https://api.openai.com/v1/chat/completions"

    def __init__(self):
        # Ensure cache directory exists
        self.cache_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "cache")
        os.makedirs(self.cache_dir, exist_ok=True)

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "system_prompt": (
                    "STRING",
                    {
                        "multiline": True,
                        "default": "",
                        "tooltip": "System instruction for the AI",
                    },
                ),
                "user_prompt": (
                    "STRING",
                    {
                        "multiline": True,
                        "default": "",
                        "tooltip": "User prompt for OpenAI",
                    },
                ),
                "api_key": (
                    "STRING",
                    {
                        "multiline": False,
                        "default": "",
                        "tooltip": "Your OpenAI API Key"
                    }
                ),
                "model": (
                    ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-3.5-turbo"],
                    {
                        "default": "gpt-4o-mini",
                        "tooltip": "Choose OpenAI model",
                    },
                ),
                "max_tokens": (
                    "INT",
                    {
                        "default": 2048,
                        "min": 1,
                        "max": 32768,
                        "step": 1,
                        "display": "number",
                        "tooltip": "Maximum number of tokens to generate",
                    },
                ),
                "temperature": (
                    "FLOAT",
                    {
                        "default": 0.7,
                        "min": 0.0,
                        "max": 2.0,
                        "step": 0.1,
                        "display": "number",
                        "tooltip": "Sampling temperature (0.0 = deterministic, 2.0 = very random)",
                    },
                ),
                "top_p": (
                    "FLOAT",
                    {
                        "default": 0.95,
                        "min": 0.0,
                        "max": 1.0,
                        "step": 0.05,
                        "display": "number",
                        "tooltip": "Top-p sampling threshold",
                    },
                ),
                "seed": (
                    "INT",
                    {
                        "default": 0,
                        "min": 0,
                        "max": 0xffffffffffffffff,
                        "step": 1,
                        "display": "number",
                        "control_after_generate": True,
                        "tooltip": "Random seed for reproducible results",
                    },
                ),
            },
            "optional": {
                "images": (
                    "IMAGE",
                    {
                        "default": None,
                        "tooltip": "Optional reference images for context"
                    }
                ),
            }
        }

    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("text",)
    FUNCTION = "generate_text"
    CATEGORY = "💀PromptsO"
    DESCRIPTION = "Generates text using OpenAI API with custom API key."
    
    def validate_inputs(self, user_prompt: str, api_key: str) -> None:
        """Validate input parameters"""
        if not user_prompt or not user_prompt.strip():
            raise ValueError("User prompt cannot be empty")
        if not api_key or not api_key.strip():
            raise ValueError("API key cannot be empty")
    
    def encode_image_to_base64(self, image_tensor):
        """Convert image tensor to base64 string"""
        try:
            # Convert tensor to PIL Image
            if len(image_tensor.shape) == 4:
                # Batch of images, take first one
                img_tensor = image_tensor[0]
            else:
                img_tensor = image_tensor
            
            # Convert from torch tensor to PIL Image
            img_array = (img_tensor.cpu().numpy() * 255).astype('uint8')
            pil_image = Image.fromarray(img_array)
            
            # Convert to base64
            buffer = io.BytesIO()
            pil_image.save(buffer, format="JPEG", quality=95)
            img_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
            
            return img_base64
        except Exception as e:
            print(f"⚠️ Error encoding image: {e}")
            return None
    
    def generate_cache_key(self, system_prompt: str, user_prompt: str, model: str, 
                          max_tokens: int, temperature: float, top_p: float, seed: int, images=None) -> str:
        """Generate cache key based on input parameters"""
        cache_data = {
            "system_prompt": system_prompt.strip(),
            "user_prompt": user_prompt.strip(),
            "model": model,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "top_p": top_p,
            "seed": seed
        }
        
        # Include image hash if images are provided
        if images is not None:
            # Create a simple hash of the image tensor
            image_hash = hashlib.md5(str(images.shape).encode() + str(images.sum().item()).encode()).hexdigest()[:8]
            cache_data["images_hash"] = image_hash
        
        cache_str = json.dumps(cache_data, sort_keys=True)
        return hashlib.md5(cache_str.encode()).hexdigest()
    
    def load_from_cache(self, cache_key: str) -> Optional[str]:
        """Load result from cache if exists"""
        cache_file = os.path.join(self.cache_dir, f"{cache_key}.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                    return cache_data.get("result")
            except Exception as e:
                print(f"⚠️ Failed to load cache: {e}")
        return None
    
    def save_to_cache(self, cache_key: str, result: str) -> None:
        """Save result to cache"""
        cache_file = os.path.join(self.cache_dir, f"{cache_key}.json")
        try:
            cache_data = {
                "result": result,
                "timestamp": json.dumps({"time": "cached"})
            }
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            print(f"⚠️ Failed to save cache: {e}")
    
    def generate_text(
        self,
        system_prompt,
        user_prompt,
        api_key,
        model="gpt-4o-mini",
        max_tokens=2048,
        temperature=0.7,
        top_p=0.95,
        seed=0,
        images=None,
    ):
        self.validate_inputs(user_prompt, api_key)
        
        # Generate cache key
        cache_key = self.generate_cache_key(
            system_prompt, user_prompt, model, max_tokens, temperature, top_p, seed, images
        )
        
        # Check cache first
        cached_result = self.load_from_cache(cache_key)
        if cached_result:
            print(f"💾 Using cached result for seed {seed}")
            return (cached_result,)
        
        # Prepare messages
        messages = []
        
        # Add system message if provided
        if system_prompt and system_prompt.strip():
            messages.append({
                "role": "system",
                "content": system_prompt.strip()
            })
        
        # Prepare user message content
        if images is not None:
            print(f"🖼️ Adding reference image(s) for context")
            # Use OpenAI Vision API format
            user_content = [
                {"type": "text", "text": user_prompt}
            ]
            
            if len(images.shape) == 4:  # Batch of images
                for i in range(min(images.shape[0], 4)):  # Limit to 4 images
                    image_b64 = self.encode_image_to_base64(images[i])
                    user_content.append({
                        "type": "image_url",
                        "image_url": {"url": f"data:image/png;base64,{image_b64}"}
                    })
            else:  # Single image
                image_b64 = self.encode_image_to_base64(images)
                user_content.append({
                    "type": "image_url",
                    "image_url": {"url": f"data:image/png;base64,{image_b64}"}
                })
            
            messages.append({
                "role": "user",
                "content": user_content
            })
        else:
            # Text-only message
            messages.append({
                "role": "user",
                "content": user_prompt
            })
        
        # Prepare request data
        data = {
            "model": model,
            "messages": messages,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "top_p": top_p,
            "stream": False
        }
        
        # Add seed for reproducible results
        if seed > 0:
            data["seed"] = seed
        
        # Make API call
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        print(f"🤖 Generating text with OpenAI...")
        print(f"   • Model: {model}")
        print(f"   • User prompt: {user_prompt[:50]}{'...' if len(user_prompt) > 50 else ''}")
        if system_prompt.strip():
            print(f"   • System prompt: {system_prompt[:30]}{'...' if len(system_prompt) > 30 else ''}")
        if images is not None:
            print(f"   • Reference images: {images.shape[0] if len(images.shape) == 4 else 1}")
        print(f"   • Max tokens: {max_tokens}")
        print(f"   • Temperature: {temperature}")
        print(f"   • Top-p: {top_p}")
        print(f"   • Seed: {seed}")
        print(f"   • Cache key: {cache_key[:8]}...")
        
        try:
            response = requests.post(
                self.API_URL, 
                headers=headers, 
                json=data, 
                timeout=120
            )
            
            if response.status_code != 200:
                error_data = response.json() if response.content else {}
                error_msg = error_data.get("error", {}).get("message", response.text)
                raise Exception(f"API request failed with status {response.status_code}: {error_msg}")
            
            # Parse response
            result = response.json()
            
            if "choices" not in result or not result["choices"]:
                raise Exception("No response generated from API")
            
            # Extract generated text
            generated_text = result["choices"][0]["message"]["content"]
            
            # Log usage information
            if "usage" in result:
                usage = result["usage"]
                print(f"📊 Token usage:")
                print(f"   • Prompt tokens: {usage.get('prompt_tokens', 0)}")
                print(f"   • Completion tokens: {usage.get('completion_tokens', 0)}")
                print(f"   • Total tokens: {usage.get('total_tokens', 0)}")
            
            print(f"✅ Text generated successfully!")
            print(f"   • Response length: {len(generated_text)} characters")
            
            # Save to cache
            self.save_to_cache(cache_key, generated_text)
            
            return (generated_text,)
            
        except Exception as e:
            print(f"❌ Error generating text: {str(e)}")
            raise e