import os
import base64
import io
import json
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import requests
import tempfile
from io import BytesIO
from google import genai
from google.genai import types
import time
import traceback
import asyncio
import concurrent.futures
import random
from typing import List, Tuple, Optional


class GeminiImageEditor:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": ("STRING", {"multiline": True}),
                "api_key": ("STRING", {"default": "", "multiline": False}),
                "model": ("STRING", {
                    "default": "models/gemini-2.0-flash-preview-image-generation",
                    "multiline": False,
                    "placeholder": "e.g., gemini-2.5-flash, models/gemini-2.0-flash-exp, imagen-3.0-generate-001"
                }),
                "temperature": ("FLOAT", {"default": 1, "min": 0.0, "max": 2.0, "step": 0.05}),
                "max_retries": ("INT", {"default": 3, "min": 1, "max": 5, "step": 1}),
                "batch_size": ("INT", {"default": 1, "min": 1, "max": 8, "step": 1}),
            },
            "optional": {
                "seed": ("INT", {"default": 66666666, "min": 0, "max": 66666666}),
                "image1": ("IMAGE",),
                "image2": ("IMAGE",),
                "image3": ("IMAGE",),
                "image4": ("IMAGE",),
                "api_version": (["auto", "v1", "v1beta", "v1alpha"], {"default": "auto"}),
            }
        }

    RETURN_TYPES = ("IMAGE", "STRING", "STRING", "STRING", "STRING")
    RETURN_NAMES = ("image", "caption", "API Respond", "api_request", "api_response")
    FUNCTION = "generate_image"
    CATEGORY = "🤖 Gemini"

    def __init__(self):
        """Initialize logging system"""
        self.log_messages = []  # Global log message storage
        self.api_requests = []  # Store API requests for batch processing
        self.api_responses = []  # Store API responses for batch processing

        # Check google-genai version
        try:
            import importlib.metadata
            genai_version = importlib.metadata.version('google-genai')
            self._log(f"Current google-genai version: {genai_version}")

            # Check if version meets minimum requirements
            from packaging import version
            if version.parse(genai_version) < version.parse('0.8.0'):
                self._log("Warning: google-genai version is too low, recommend upgrading to the latest version")
                self._log("Suggested: pip install -q -U google-genai")
        except Exception as e:
            self._log(f"Unable to check google-genai version: {e}")

    def _log(self, message):
        """Global logging function: record to log list"""
        timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
        formatted_message = f"[GeminiImageGenerator] {timestamp}: {message}"
        print(formatted_message)
        if hasattr(self, 'log_messages'):
            self.log_messages.append(message)
        return message


    def _create_error_image(self, error_message="API Failed to return an image", width=1024, height=1024):
        """Create black image with error text"""
        # Create black image
        image = Image.new('RGB', (width, height), color=(0, 0, 0))
        draw = ImageDraw.Draw(image)

        # Try to use a system font
        try:
            # Try to find a font that exists on most systems
            font_options = ['Arial.ttf', 'DejaVuSans.ttf', 'FreeSans.ttf', 'NotoSans-Regular.ttf']
            font = None

            for font_name in font_options:
                try:
                    font = ImageFont.truetype(font_name, 24)
                    break
                except IOError:
                    continue

            if font is None:
                # Fall back to default font
                font = ImageFont.load_default()
        except Exception:
            # If everything fails, use default
            font = ImageFont.load_default()

        # Calculate text position (centered)
        text_width = draw.textlength(error_message, font=font) if hasattr(draw, 'textlength') else \
        font.getsize(error_message)[0]
        text_x = (width - text_width) / 2
        text_y = height / 2 - 12  # Vertically centered

        # Draw text
        draw.text((text_x, text_y), error_message, fill=(255, 0, 0), font=font)

        # Convert to tensor format [1, H, W, 3]
        img_array = np.array(image).astype(np.float32) / 255.0
        img_tensor = torch.from_numpy(img_array).unsqueeze(0)

        self._log(f"Created error image with message: '{error_message}'")
        return img_tensor

    def _generate_empty_image(self, width=1024, height=1024):
        """Generate standard format empty RGB image tensor - ensure ComfyUI compatible format [B,H,W,C]"""
        # Now just use error image with default message
        return self._create_error_image(width=width, height=height)

    def _process_tensor_to_pil(self, tensor, name="Image"):
        """Convert a tensor to a PIL image for API submission"""
        try:
            if tensor is None:
                self._log(f"{name} is None, skipping")
                return None

            # Ensure tensor is in correct format [1, H, W, 3]
            if len(tensor.shape) == 4 and tensor.shape[0] == 1:
                # Get first frame image
                image_np = tensor[0].cpu().numpy()

                # Convert to uint8 format for PIL
                image_np = (image_np * 255).astype(np.uint8)

                # Create PIL image
                pil_image = Image.fromarray(image_np)

                self._log(f"{name} processed successfully, size: {pil_image.width}x{pil_image.height}")
                return pil_image
            else:
                self._log(f"{name} format incorrect: {tensor.shape}")
                return None
        except Exception as e:
            self._log(f"Error processing {name}: {str(e)}")
            return None

    def _call_gemini_api(self, client, model, contents, gen_config, retry_count=0, max_retries=3, batch_id=0):
        """Call Gemini API with retry logic"""
        try:
            self._log(f"[Batch {batch_id}] API call attempt #{retry_count + 1}")
            
            # Store the request details
            request_data = {
                "model": model,
                "contents": contents if isinstance(contents, str) else [str(c) if not hasattr(c, '_pb') else "PIL Image" for c in contents],
                "config": {
                    "temperature": gen_config.temperature if hasattr(gen_config, 'temperature') else None,
                    "seed": gen_config.seed if hasattr(gen_config, 'seed') else None,
                    "response_modalities": gen_config.response_modalities if hasattr(gen_config, 'response_modalities') else None
                }
            }
            
            # Store request for this batch
            if hasattr(self, 'api_requests'):
                while len(self.api_requests) <= batch_id:
                    self.api_requests.append({})
                self.api_requests[batch_id] = request_data
            
            response = client.models.generate_content(
                model=model,
                contents=contents,
                config=gen_config
            )

            # Validate response structure
            if not hasattr(response, 'candidates') or not response.candidates:
                self._log(f"[Batch {batch_id}] Empty response: No candidates found")
                if retry_count < max_retries - 1:
                    self._log(f"[Batch {batch_id}] Retrying in 2 seconds... (Attempt {retry_count + 1}/{max_retries})")
                    time.sleep(2)  # Wait 2 seconds before retry
                    return self._call_gemini_api(client, model, contents, gen_config,
                                                 retry_count + 1, max_retries, batch_id)
                else:
                    self._log(f"[Batch {batch_id}] Maximum retries ({max_retries}) reached. Returning empty response.")
                    return None

            # Check if candidates[0].content exists
            if not hasattr(response.candidates[0], 'content') or response.candidates[0].content is None:
                self._log(f"[Batch {batch_id}] Invalid response: candidates[0].content is missing")
                if retry_count < max_retries - 1:
                    self._log(f"[Batch {batch_id}] Retrying in 2 seconds... (Attempt {retry_count + 1}/{max_retries})")
                    time.sleep(2)
                    return self._call_gemini_api(client, model, contents, gen_config,
                                                 retry_count + 1, max_retries, batch_id)
                else:
                    self._log(f"[Batch {batch_id}] Maximum retries ({max_retries}) reached. Returning empty response.")
                    return None

            # Check if content.parts exists
            if not hasattr(response.candidates[0].content, 'parts') or response.candidates[0].content.parts is None:
                self._log(f"[Batch {batch_id}] Invalid response: candidates[0].content.parts is missing")
                if retry_count < max_retries - 1:
                    self._log(f"[Batch {batch_id}] Retrying in 2 seconds... (Attempt {retry_count + 1}/{max_retries})")
                    time.sleep(2)
                    return self._call_gemini_api(client, model, contents, gen_config,
                                                 retry_count + 1, max_retries, batch_id)
                else:
                    self._log(f"[Batch {batch_id}] Maximum retries ({max_retries}) reached. Returning empty response.")
                    return None

            # Valid response, return it
            self._log(f"[Batch {batch_id}] Valid API response received")
            return response

        except Exception as e:
            self._log(f"[Batch {batch_id}] API call error: {str(e)}")
            if retry_count < max_retries - 1:
                wait_time = 2 * (retry_count + 1)  # Progressive backoff: 2s, 4s, 6s...
                self._log(
                    f"[Batch {batch_id}] Retrying in {wait_time} seconds... (Attempt {retry_count + 1}/{max_retries})")
                time.sleep(wait_time)
                return self._call_gemini_api(client, model, contents, gen_config,
                                             retry_count + 1, max_retries, batch_id)
            else:
                self._log(f"[Batch {batch_id}] Maximum retries ({max_retries}) reached. Giving up.")
                return None

    def _process_api_response(self, response, batch_id=0):
        """Process API response and extract image tensor"""
        if response is None:
            self._log(f"[Batch {batch_id}] No valid response to process")
            error_msg = "API Failed to return an image"
            # Store empty response for this batch
            if hasattr(self, 'api_responses'):
                while len(self.api_responses) <= batch_id:
                    self.api_responses.append({})
                self.api_responses[batch_id] = {"error": error_msg}
            return self._create_error_image(error_msg), error_msg

        response_text = ""
        
        # Store response data
        response_data = {
            "candidates": []
        }

        # Check if response contains valid data
        if not hasattr(response, 'candidates') or not response.candidates:
            self._log(f"[Batch {batch_id}] No candidates in API response")
            error_msg = "API returned an empty response"
            if hasattr(self, 'api_responses'):
                while len(self.api_responses) <= batch_id:
                    self.api_responses.append({})
                self.api_responses[batch_id] = {"error": error_msg}
            return self._create_error_image(error_msg), error_msg

        # Store candidate data in response
        for candidate in response.candidates:
            candidate_data = {
                "finish_reason": str(candidate.finish_reason) if hasattr(candidate, 'finish_reason') else None,
                "content": {"parts": []}
            }
            response_data["candidates"].append(candidate_data)
        
        # Iterate through response parts
        for part in response.candidates[0].content.parts:
            # Check if it's a text part
            if hasattr(part, 'text') and part.text is not None:
                text_content = part.text
                response_text += text_content
                response_data["candidates"][0]["content"]["parts"].append({"text": text_content})
                self._log(
                    f"[Batch {batch_id}] API returned text: {text_content[:100]}..." if len(
                        text_content) > 100 else text_content)

            # Check if it's an image part
            elif hasattr(part, 'inline_data') and part.inline_data is not None:
                response_data["candidates"][0]["content"]["parts"].append({"inline_data": {"mime_type": part.inline_data.mime_type if hasattr(part.inline_data, 'mime_type') else "image/png", "data": "<base64_image_data>"}})
                self._log(f"[Batch {batch_id}] API returned image data")
                try:
                    # Get image data
                    image_data = part.inline_data.data
                    mime_type = part.inline_data.mime_type if hasattr(part.inline_data, 'mime_type') else "unknown"

                    # Confirm data is not empty
                    if not image_data or len(image_data) < 100:
                        self._log(f"[Batch {batch_id}] Warning: Image data is empty or too small")
                        continue

                    # Multiple methods to try opening the image
                    pil_image = None

                    # Method 1: Direct PIL open
                    try:
                        pil_image = Image.open(BytesIO(image_data))
                        self._log(
                            f"[Batch {batch_id}] Direct PIL open successful, size: {pil_image.width}x{pil_image.height}")
                    except Exception as e1:
                        self._log(f"[Batch {batch_id}] Direct PIL open failed: {str(e1)}")

                        # Method 2: Save to temp file and open
                        try:
                            temp_file = os.path.join(tempfile.gettempdir(),
                                                     f"gemini_image_{batch_id}_{int(time.time())}.png")
                            with open(temp_file, "wb") as f:
                                f.write(image_data)

                            pil_image = Image.open(temp_file)
                            self._log(f"[Batch {batch_id}] Opening via temp file successful")
                        except Exception as e2:
                            self._log(f"[Batch {batch_id}] Opening via temp file failed: {str(e2)}")

                            # Try more methods if needed
                            # Additional opening methods from original code could be added here if necessary

                    # Ensure image loaded successfully
                    if pil_image is None:
                        self._log(f"[Batch {batch_id}] Cannot open image, skipping")
                        continue

                    # Ensure image is RGB mode
                    if pil_image.mode != 'RGB':
                        pil_image = pil_image.convert('RGB')
                        self._log(f"[Batch {batch_id}] Image converted to RGB mode")

                    # Log image size
                    width, height = pil_image.size
                    self._log(f"[Batch {batch_id}] Image size: {width}x{height}")

                    # Convert to ComfyUI format
                    img_array = np.array(pil_image).astype(np.float32) / 255.0
                    img_tensor = torch.from_numpy(img_array).unsqueeze(0)

                    self._log(f"[Batch {batch_id}] Image converted to tensor successfully, shape: {img_tensor.shape}")
                    # Store successful response data
                    if hasattr(self, 'api_responses'):
                        while len(self.api_responses) <= batch_id:
                            self.api_responses.append({})
                        self.api_responses[batch_id] = response_data
                    return img_tensor, response_text
                except Exception as e:
                    self._log(f"[Batch {batch_id}] Image processing error: {e}")
                    traceback.print_exc()

        # If we got here, no image was found
        self._log(f"[Batch {batch_id}] No image data found in API response")
        error_msg = "API Failed to return an image"
        # Store response data even if no image was found
        if hasattr(self, 'api_responses'):
            while len(self.api_responses) <= batch_id:
                self.api_responses.append({})
            self.api_responses[batch_id] = response_data
        return self._create_error_image(error_msg), response_text if response_text else error_msg

    def _generate_single_image_sync(self, prompt, api_key, model, temperature, max_retries,
                                    batch_id, seed, reference_images, api_version=None):
        """Generate a single image synchronously for batch processing"""
        try:
            # Handle None or invalid api_version
            if api_version is None or api_version not in ["auto", "v1", "v1beta", "v1alpha"]:
                api_version = "auto"
            
            # Create client instance - each batch gets its own client
            # Note: The google-genai SDK automatically handles API version selection
            # The api_version parameter is provided for future compatibility
            if api_version != "auto":
                self._log(f"[Batch {batch_id}] Note: Using google-genai SDK which automatically handles API versions. Requested version: {api_version}")
            
            client = genai.Client(api_key=api_key)

            # Use provided seed or generate random one
            actual_seed = seed if seed != 0 else random.randint(1, 2 ** 31 - 1)
            self._log(f"[Batch {batch_id}] Using seed: {actual_seed}")

            # Configure generation parameters
            gen_config = types.GenerateContentConfig(
                temperature=temperature,
                seed=actual_seed,
                response_modalities=['Text', 'Image']
            )

            # Create content parts
            content_parts = []

            # Add prompt
            simple_prompt = f"Create a detailed image of: {prompt}"
            content_parts.append(simple_prompt)

            # Add reference images if provided
            for img in reference_images:
                if img is not None:
                    content_parts.append(img)

            # Make API call synchronously
            response = self._call_gemini_api(
                client=client,
                model=model,
                contents=content_parts,
                gen_config=gen_config,
                max_retries=max_retries,
                batch_id=batch_id
            )

            # Process the response and return the image tensor and text
            img_tensor, response_text = self._process_api_response(response, batch_id)

            # If processing failed, return the error image
            if img_tensor is None:
                error_msg = f"Batch {batch_id}: API Failed to return an image"
                return self._create_error_image(error_msg), error_msg, batch_id

            return img_tensor, response_text, batch_id

        except Exception as e:
            self._log(f"[Batch {batch_id}] Error in async image generation: {str(e)}")
            error_msg = f"Batch {batch_id}: Error: {str(e)}"
            return self._create_error_image(error_msg), error_msg, batch_id

    def generate_image(self, prompt, api_key, model, temperature, max_retries=3, batch_size=1,
                       seed=66666666, image1=None, image2=None, image3=None, image4=None, api_version=None):
        """Generate batch of images with parallel API calls"""
        # Reset log messages
        self.log_messages = []
        all_response_text = ""
        
        # Handle None or invalid api_version
        if api_version is None or api_version not in ["auto", "v1", "v1beta", "v1alpha"]:
            api_version = "auto"
        
        # Handle seed that might be None or invalid
        try:
            seed = int(seed) if seed is not None else 66666666
        except (ValueError, TypeError):
            seed = 66666666  # Use default seed

        try:
            # Check if API key is provided
            if not api_key:
                error_message = "Error: No API key provided. Please enter Google API key in the node."
                self._log(error_message)
                error_img = self._create_error_image("API key required")
                full_text = "## Error\n" + error_message + "\n\n## Instructions\n1. Enter your Google API key in the node"

                # For batch size > 1, create batch of error images
                if batch_size > 1:
                    error_imgs = [error_img] * batch_size
                    batch_tensor = torch.cat(error_imgs, dim=0)
                    return (batch_tensor, "", full_text, "{}", "{}")
                else:
                    return (error_img, "", full_text, "{}", "{}")

            self._log(f"Starting batch generation of {batch_size} images")

            # Process reference images once
            reference_pil_images = []
            image_tensors = [image1, image2, image3, image4]

            for i, img_tensor in enumerate(image_tensors):
                if img_tensor is not None:
                    pil_img = self._process_tensor_to_pil(img_tensor, f"Reference Image {i + 1}")
                    if pil_img:
                        reference_pil_images.append(pil_img)
                        self._log(f"Added reference image {i + 1} to batch processing")

            # Use ThreadPoolExecutor for parallel batch processing
            import concurrent.futures
            results = []
            
            with concurrent.futures.ThreadPoolExecutor(max_workers=min(batch_size, 10)) as executor:
                futures = []
                
                # Submit tasks for each batch item
                for i in range(batch_size):
                    # If seed is specified (non-zero), increment it for each batch item
                    # Otherwise each batch will use a random seed
                    batch_seed = seed + i if seed != 0 else 0

                    future = executor.submit(
                        self._generate_single_image_sync,
                        prompt=prompt,
                        api_key=api_key,
                        model=model,
                        temperature=temperature,
                        max_retries=max_retries,
                        batch_id=i + 1,
                        seed=batch_seed,
                        reference_images=reference_pil_images,
                        api_version=api_version
                    )
                    futures.append(future)

                self._log(f"Executing {len(futures)} batch items in parallel using thread pool")
                
                # Collect results as they complete
                for future in concurrent.futures.as_completed(futures):
                    try:
                        result = future.result()
                        results.append(result)
                    except Exception as e:
                        self._log(f"Batch item failed with error: {str(e)}")
                        error_img = self._create_error_image(f"Batch error: {str(e)}")
                        results.append((error_img, str(e), 0))
            
            # Process results (ensure results is not None)
            if not results:
                self._log("Batch processing did not yield results")
                error_imgs = [self._create_error_image("Batch processing failed")] * batch_size
                batch_tensor = torch.cat(error_imgs, dim=0)
                return (batch_tensor, "", "Batch processing failed", "{}", "{}")

            # Process results
            all_tensors = []
            batch_texts = []
            all_captions = []

            for img_tensor, text, batch_id in results:
                # Always add tensor to list since we now always have a valid tensor
                # (either real image or error image)
                all_tensors.append(img_tensor)
                batch_texts.append(f"## Batch {batch_id} Response\n{text}")
                # Extract caption from text response
                all_captions.append(text)

            # Combine all tensors into a batch
            if len(all_tensors) == 1:
                # Just return the single tensor
                batch_tensor = all_tensors[0]
            else:
                # Concatenate all tensors along batch dimension
                batch_tensor = torch.cat(all_tensors, dim=0)

            self._log(f"Successfully created batch of {len(all_tensors)} images, final shape: {batch_tensor.shape}")

            # Combine all texts
            all_response_text = "## Batch Processing Results\n" + "\n".join(self.log_messages) + "\n\n" + "\n\n".join(
                batch_texts)
            
            # Combine captions (separated by newlines for batch)
            combined_captions = "\n\n".join(all_captions) if all_captions else ""

            return (batch_tensor, combined_captions, all_response_text, json.dumps(self.api_requests, indent=2) if self.api_requests else "{}", json.dumps(self.api_responses, indent=2) if self.api_responses else "{}")

        except Exception as e:
            error_message = f"Error during batch processing: {str(e)}"
            self._log(error_message)
            traceback.print_exc()

            # Create batch of error images
            error_imgs = [self._create_error_image(f"Error: {str(e)}")] * batch_size
            batch_tensor = torch.cat(error_imgs, dim=0)

            # Combine logs and error info
            full_text = "## Processing Log\n" + "\n".join(self.log_messages) + "\n\n## Error\n" + error_message
            return (batch_tensor, "", full_text, json.dumps(self.api_requests, indent=2) if self.api_requests else "{}", json.dumps(self.api_responses, indent=2) if self.api_responses else "{}")