import os
import json
import random
import numpy as np
import torch
import requests
import base64
import re
from io import BytesIO
from typing import Dict, List, Optional, Tuple, Union
from enum import Enum
from dataclasses import dataclass
from groq import Groq, GroqError
from PIL import Image, ImageOps, ImageFilter, ImageDraw

# Constants
DEFAULT_API_KEY = os.getenv('GROQ_API_KEY', '')
MAX_TOKENS = 8192
DEFAULT_TEMPERATURE = 0.7
DEFAULT_TOP_P = 0.9
DEFAULT_MAX_TOKENS = 1024

class ModelType(Enum):
    TEXT = "text"
    VISION = "vision"
    AUDIO = "audio"
    EMBEDDING = "embedding"
    CODE = "code"

@dataclass
class ModelInfo:
    id: str
    name: str
    type: ModelType
    description: str
    max_tokens: int = 4096
    supports_images: bool = False
    supports_audio: bool = False
    supports_functions: bool = False

# Available GROQ models
AVAILABLE_MODELS = [
    # Text models
    ModelInfo(
        "llama3-70b-8192", 
        "LLaMA 3 70B", 
        ModelType.TEXT, 
        "Meta's 70B parameter model, great for complex tasks",
        max_tokens=8192,
        supports_functions=True
    ),
    ModelInfo(
        "llama3-8b-8192", 
        "LLaMA 3 8B", 
        ModelType.TEXT, 
        "Faster 8B parameter model for quick responses",
        max_tokens=8192
    ),
    ModelInfo(
        "mixtral-8x7b-32768", 
        "Mixtral 8x7B", 
        ModelType.TEXT, 
        "Mixture of Experts model with 8 experts and 32768 context",
        max_tokens=32768
    ),
    
    # Vision models
    ModelInfo(
        "llava-v1.6-34b-4096-preview", 
        "LLaVA 1.6 34B", 
        ModelType.VISION, 
        "Advanced vision-language model with 34B parameters",
        max_tokens=4096,
        supports_images=True
    ),
    ModelInfo(
        "llava-v1.5-7b-4096-preview", 
        "LLaVA 1.5 7B", 
        ModelType.VISION, 
        "Faster vision-language model with 7B parameters",
        max_tokens=4096,
        supports_images=True
    ),
    
    # Code models
    ModelInfo(
        "codeqwen-7b", 
        "CodeQwen 7B", 
        ModelType.CODE, 
        "Specialized for code generation and understanding",
        max_tokens=8192
    )
]

# Helper functions
def get_model_info(model_id: str) -> Optional[ModelInfo]:
    """Get model info by ID"""
    for model in AVAILABLE_MODELS:
        if model.id == model_id:
            return model
    return None

def get_model_choices(model_type: Optional[ModelType] = None):
    """Get list of model choices, optionally filtered by type"""
    if model_type is None:
        return [model.id for model in AVAILABLE_MODELS]
    return [model.id for model in AVAILABLE_MODELS if model.type == model_type]

def get_model_descriptions():
    """Get descriptions for all models"""
    return {model.id: f"{model.name}: {model.description}" for model in AVAILABLE_MODELS}

# Image processing utilities
def process_image(image_tensor, crop_region=None, resize_dims=None, enhance=False):
    """Process image tensor with optional cropping, resizing, and enhancement"""
    # Convert tensor to PIL Image
    if image_tensor.dim() == 4:
        image_tensor = image_tensor[0]  # Take first image in batch
    
    image_np = image_tensor.cpu().numpy()
    image_np = (image_np * 255).astype(np.uint8)
    
    if image_np.shape[0] == 3:  # CHW to HWC
        image_np = image_np.transpose(1, 2, 0)
    
    image = Image.fromarray(image_np)
    
    # Apply processing
    if crop_region:
        x, y, w, h = crop_region
        image = image.crop((x, y, x+w, y+h))
    
    if resize_dims:
        image = image.resize(resize_dims, Image.LANCZOS)
    
    if enhance:
        # Simple enhancement - can be expanded
        image = ImageEnhance.Contrast(image).enhance(1.2)
        image = ImageEnhance.Sharpness(image).enhance(1.1)
    
    return image

class GroqNode:
    """Base class for GROQ nodes with common functionality"""
    
    @classmethod
    def load_prompt_options(cls, prompt_files):
        """Load prompt options from JSON files"""
        prompt_options = {}
        current_dir = os.path.dirname(os.path.realpath(__file__))
        
        for file_path in prompt_files:
            full_path = os.path.join(current_dir, '..', 'groq', file_path)
            try:
                if os.path.exists(full_path):
                    with open(full_path, 'r') as f:
                        prompts = json.load(f)
                        if isinstance(prompts, list):
                            prompt_options.update({p['name']: p['content'] for p in prompts if 'name' in p and 'content' in p})
            except Exception as e:
                print(f"Error loading prompts from {file_path}: {str(e)}")
        return prompt_options

    def get_prompt_content(self, prompt_name, prompt_options):
        """Get content for a specific prompt name"""
        return prompt_options.get(prompt_name, "")

    def tensor_to_pil(self, image_tensor):
        """Convert a PyTorch tensor to a PIL Image"""
        if image_tensor.dim() == 4 and image_tensor.shape[0] == 1:
            image_tensor = image_tensor.squeeze(0)
        
        if image_tensor.dim() == 3 and image_tensor.shape[2] == 3:
            image_array = image_tensor.cpu().numpy()
            image_array = (image_array * 255).astype(np.uint8)
            return Image.fromarray(image_array)
        raise ValueError(f"Unsupported image tensor shape: {image_tensor.shape}")

    def encode_image(self, image_pil):
        """Encode PIL Image to base64"""
        try:
            buffered = BytesIO()
            image_pil.save(buffered, format="JPEG")
            return base64.b64encode(buffered.getvalue()).decode('utf-8')
        except Exception as e:
            print(f"Error encoding image: {e}")
            return None

class GroqLLMNode:
    """Advanced GROQ LLM Node with enhanced features for text generation"""
    
    @classmethod
    def INPUT_TYPES(cls):
        # Get model descriptions for tooltips
        model_descriptions = get_model_descriptions()
        text_models = get_model_choices(ModelType.TEXT)
        
        return {
            "required": {
                "api_key": ("STRING", {
                    "default": DEFAULT_API_KEY, 
                    "multiline": False, 
                    "tooltip": "Your GROQ API key. Leave empty to use GROQ_API_KEY environment variable."
                }),
                "model": (text_models, {
                    "default": "llama3-70b-8192",
                    "tooltip": model_descriptions.get("llama3-70b-8192", "Select a text generation model")
                }),
                "prompt": ("STRING", {
                    "multiline": True, 
                    "default": "",
                    "tooltip": "Your input prompt or question"
                }),
                "system_message": ("STRING", {
                    "multiline": True, 
                    "default": "You are a helpful AI assistant with deep knowledge and analytical skills.",
                    "tooltip": "System message to guide the model's behavior"
                }),
                "conversation_history": ("STRING", {
                    "multiline": True, 
                    "default": "",
                    "tooltip": "Previous conversation history (format: 'user: message\nassistant: response')"
                }),
                "temperature": ("FLOAT", {
                    "default": DEFAULT_TEMPERATURE, 
                    "min": 0.0, 
                    "max": 2.0, 
                    "step": 0.05,
                    "tooltip": "Lower values make output more deterministic, higher more creative"
                }),
                "max_tokens": ("INT", {
                    "default": DEFAULT_MAX_TOKENS, 
                    "min": 1, 
                    "max": 8192, 
                    "step": 1,
                    "tooltip": "Maximum number of tokens to generate"
                }),
                "top_p": ("FLOAT", {
                    "default": DEFAULT_TOP_P, 
                    "min": 0.1, 
                    "max": 1.0, 
                    "step": 0.01,
                    "tooltip": "Nucleus sampling: consider only the top tokens with cumulative probability >= top_p"
                }),
                "frequency_penalty": ("FLOAT", {
                    "default": 0.0, 
                    "min": -2.0, 
                    "max": 2.0, 
                    "step": 0.1,
                    "tooltip": "Positive values penalize new tokens based on existing frequency"
                }),
                "presence_penalty": ("FLOAT", {
                    "default": 0.0, 
                    "min": -2.0, 
                    "max": 2.0, 
                    "step": 0.1,
                    "tooltip": "Positive values penalize new tokens based on whether they appear in the text so far"
                }),
                "seed": ("INT", {
                    "default": -1, 
                    "min": -1, 
                    "max": 2**32-1,
                    "tooltip": "Random seed (-1 for random)"
                }),
                "format_output": (["plain", "markdown", "json", "html", "yaml"], {
                    "default": "markdown",
                    "tooltip": "Format for the output"
                }),
                "enable_function_calling": ("BOOLEAN", {"default": False}),
            },
            "optional": {
                "context_documents": ("LIST", {"default": []}),
                "stop_sequences": ("STRING", {"default": ""}),
            },
            "hidden": {
                "prompt": "PROMPT",
                "extra_pnginfo": "EXTRA_PNGINFO",
                "unique_id": "UNIQUE_ID",
            }
        }
    
    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("response",)
    FUNCTION = "generate"
    CATEGORY = "ComfyGroq/Text"

    def generate(self, api_key, model, prompt, system_message, temperature, max_tokens, top_p, seed):
        # Set random seeds for reproducibility
        torch.manual_seed(seed)
        np.random.seed(seed)
        random.seed(seed)
        
        # Use provided API key or fall back to environment variable
        api_key = api_key.strip() or os.getenv('GROQ_API_KEY')
        if not api_key:
            raise ValueError("No API key provided. Please set GROQ_API_KEY environment variable or provide it in the node.")
        
        client = Groq(api_key=api_key)
        
        try:
            response = client.chat.completions.create(
                model=model,
                messages=[
                    {"role": "system", "content": system_message},
                    {"role": "user", "content": prompt}
                ],
                temperature=temperature,
                max_tokens=max_tokens,
                top_p=top_p,
            )
            
            return (response.choices[0].message.content,)
            
        except Exception as e:
            return (f"Error: {str(e)}",)

class GroqVisionNode:
    """Advanced GROQ Vision Node with enhanced image analysis capabilities"""
    
    @classmethod
    def INPUT_TYPES(cls):
        vision_models = get_model_choices(ModelType.VISION)
        
        return {
            "required": {
                "api_key": ("STRING", {
                    "default": DEFAULT_API_KEY, 
                    "multiline": False, 
                    "tooltip": "Your GROQ API key. Leave empty to use GROQ_API_KEY environment variable."
                }),
                "model": (vision_models, {
                    "default": "llava-v1.6-34b-4096-preview",
                    "tooltip": "Select a vision-language model"
                }),
                "images": ("IMAGE", {
                    "tooltip": "One or more images to analyze"
                }),
                "prompt": ("STRING", {
                    "multiline": True, 
                    "default": "Describe this image in detail, paying attention to key elements, text, and overall composition.",
                    "tooltip": "Your question or instruction about the image(s)"
                }),
                "analysis_type": (["describe", "qa", "extract_text", "analyze_style", "compare"], {
                    "default": "describe",
                    "tooltip": "Type of analysis to perform"
                }),
                "temperature": ("FLOAT", {
                    "default": 0.2, 
                    "min": 0.0, 
                    "max": 1.0, 
                    "step": 0.05,
                    "tooltip": "Lower values make output more focused and deterministic"
                }),
                "max_tokens": ("INT", {
                    "default": 1024, 
                    "min": 1, 
                    "max": 4096, 
                    "step": 1,
                    "tooltip": "Maximum number of tokens to generate"
                }),
                "detail_level": (["low", "medium", "high"], {
                    "default": "medium",
                    "tooltip": "Level of detail in the analysis"
                }),
                "extract_metadata": ("BOOLEAN", {"default": True}),
            },
            "optional": {
                "reference_image": ("IMAGE", {"tooltip": "Reference image for comparison"}),
                "crop_region": ("TUPLE", {
                    "default": None,
                    "tooltip": "(x, y, width, height) to analyze a specific region"
                }),
            },
            "hidden": {
                "prompt": "PROMPT",
                "extra_pnginfo": "EXTRA_PNGINFO",
                "unique_id": "UNIQUE_ID",
            }
        }
    
    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("description",)
    FUNCTION = "analyze_image"
    CATEGORY = "ComfyGroq/Vision"

    def analyze_image(self, api_key, model, image, prompt, temperature, max_tokens):
        # Convert tensor to PIL Image
        pil_image = self.tensor_to_pil(image[0])  # Take first image if batch
        
        # Encode image to base64
        buffered = BytesIO()
        pil_image.save(buffered, format="JPEG")
        img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
        
        # Use provided API key or fall back to environment variable
        api_key = api_key.strip() or os.getenv('GROQ_API_KEY')
        if not api_key:
            raise ValueError("No API key provided. Please set GROQ_API_KEY environment variable or provide it in the node.")
        
        client = Groq(api_key=api_key)
        
        try:
            response = client.chat.completions.create(
                model=model,
                messages=[
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}}
                        ]
                    }
                ],
                temperature=temperature,
                max_tokens=max_tokens,
            )
            
            return (response.choices[0].message.content,)
            
        except Exception as e:
            return (f"Error: {str(e)}",)

class GroqDocumentAnalyzer:
    """Node for analyzing documents with GROQ"""
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "api_key": ("STRING", {"default": DEFAULT_API_KEY, "multiline": False}),
                "document_text": ("STRING", {"multiline": True, "default": ""}),
                "analysis_type": (["summarize", "extract_key_points", "qa", "sentiment", "entities"], {"default": "summarize"}),
                "temperature": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.05}),
                "max_tokens": ("INT", {"default": 1024, "min": 100, "max": 4096}),
            }
        }
    
    RETURN_TYPES = ("STRING", "LIST")
    RETURN_NAMES = ("analysis", "extracted_data")
    FUNCTION = "analyze_document"
    CATEGORY = "ComfyGroq/Documents"
    
    def analyze_document(self, api_key, document_text, analysis_type, temperature, max_tokens):
        # Implementation for document analysis
        pass

class GroqCodeAssistant:
    """Node for code generation and analysis with GROQ"""
    
    @classmethod
    def INPUT_TYPES(cls):
        code_models = [m.id for m in AVAILABLE_MODELS if m.type == ModelType.CODE]
        
        return {
            "required": {
                "api_key": ("STRING", {"default": DEFAULT_API_KEY, "multiline": False}),
                "model": (code_models, {"default": "codeqwen-7b"}),
                "task": ("STRING", {
                    "default": "Generate a Python function that sorts a list of dictionaries by a specified key",
                    "multiline": True
                }),
                "language": (["python", "javascript", "typescript", "java", "c++", "go", "rust"], {"default": "python"}),
                "temperature": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}),
                "max_tokens": ("INT", {"default": 2048, "min": 100, "max": 8192}),
                "include_explanation": ("BOOLEAN", {"default": True}),
            }
        }
    
    RETURN_TYPES = ("STRING", "STRING")
    RETURN_NAMES = ("code", "explanation")
    FUNCTION = "generate_code"
    CATEGORY = "ComfyGroq/Code"
    
    def generate_code(self, api_key, model, task, language, temperature, max_tokens, include_explanation):
        # Implementation for code generation
        pass

class GroqAudioProcessor:
    """Node for audio processing with GROQ (transcription, translation, etc.)"""
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "api_key": ("STRING", {"default": DEFAULT_API_KEY, "multiline": False}),
                "audio_data": ("AUDIO", {}),
                "task": (["transcribe", "translate", "summarize", "sentiment"], {"default": "transcribe"}),
                "language": ("STRING", {"default": "en", "tooltip": "ISO language code (e.g., 'en', 'es', 'fr')"}),
                "temperature": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0}),
            }
        }
    
    RETURN_TYPES = ("STRING", "FLOAT")
    RETURN_NAMES = ("text", "confidence")
    FUNCTION = "process_audio"
    CATEGORY = "ComfyGroq/Audio"
    
    def process_audio(self, api_key, audio_data, task, language, temperature):
        # Implementation for audio processing
        pass

# Node class mappings
NODE_CLASS_MAPPINGS = {
    "GroqLLM": GroqLLMNode,
    "GroqVision": GroqVisionNode,
    "GroqDocumentAnalyzer": GroqDocumentAnalyzer,
    "GroqCodeAssistant": GroqCodeAssistant,
    "GroqAudioProcessor": GroqAudioProcessor,
}

# Node display names
NODE_DISPLAY_NAME_MAPPINGS = {
    "GroqLLM": "GROQ LLM (Advanced)",
    "GroqVision": "GROQ Vision (VLM)",
    "GroqDocumentAnalyzer": "GROQ Document Analyzer",
    "GroqCodeAssistant": "GROQ Code Assistant",
    "GroqAudioProcessor": "GROQ Audio Processor",
}

# Web directory for frontend components
WEB_DIRECTORY = "./web"

# Export nodes
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
