"""
S4 Custom Prompts from Janus Pro Node  
Author: S4MUEL
GitHub: https://github.com/S4MUEL-404/ComfyUI-S4API

Generate prompts from images using local Janus-Pro models with dynamic model path detection.
"""

import os
import io
import json
import hashlib
import folder_paths
from typing import Optional
from PIL import Image
import torch
import numpy as np
from ..s4_logger import S4APILogger, log_api_start, log_api_success, log_cache_hit, log_cache_miss, log_processing_step


class S4PromptsFromJanusPro:
    """
    Generate text prompts from images using local Janus-Pro models (offline inference).
    """

    # Global model cache to avoid reloading
    _GLOBAL_MODEL_CACHE = {}
    _GLOBAL_PROCESSOR_CACHE = {}

    def __init__(self):
        # Ensure cache directory exists
        self.cache_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "cache")
        os.makedirs(self.cache_dir, exist_ok=True)

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "image": (
                    "IMAGE", 
                    {
                        "tooltip": "Input image for prompt generation"
                    }
                ),
                "model_variant": (
                    ["Janus-Pro-1B", "Janus-Pro-7B"], 
                    {
                        "default": "Janus-Pro-1B",
                        "tooltip": "Choose Janus-Pro model variant"
                    }
                ),
                "system_prompt": (
                    "STRING", 
                    {
                        "default": "Describe this image in detail, focusing on the visual elements, composition, and style.", 
                        "multiline": True,
                        "tooltip": "System prompt to guide the analysis"
                    }
                ),
                "max_tokens": (
                    "INT", 
                    {
                        "default": 768, 
                        "min": 1, 
                        "max": 4096,
                        "tooltip": "Maximum tokens to generate"
                    }
                ),
                "temperature": (
                    "FLOAT", 
                    {
                        "default": 0.6, 
                        "min": 0.0, 
                        "max": 2.0, 
                        "step": 0.1,
                        "tooltip": "Sampling temperature for creativity"
                    }
                ),
                "seed": (
                    "INT", 
                    {
                        "default": 0, 
                        "min": 0, 
                        "max": 0xffffffffffffffff,
                        "step": 1,
                        "display": "number",
                        "control_after_generate": True,
                        "tooltip": "Random seed for reproducible results"
                    }
                ),
            }
        }

    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("generated_prompts",)
    FUNCTION = "analyze_image"
    CATEGORY = "💀PromptsO"
    DESCRIPTION = "Generate text prompts from images using local Janus-Pro models with dynamic model path detection."

    def get_model_path(self, model_variant: str) -> str:
        """Get the correct model path based on ComfyUI's model directories"""
        
        # Try to get the models path from ComfyUI's folder_paths
        try:
            models_dir = folder_paths.models_dir
            S4APILogger.debug("JanusProModel", f"ComfyUI models directory: {models_dir}")
        except:
            # Fallback: try common ComfyUI model paths
            possible_paths = [
                os.path.join(os.path.dirname(folder_paths.__file__), "..", "models"),
                os.path.join(os.path.dirname(os.path.dirname(__file__)), "..", "..", "models"),
                "D:\\AI\\Models",  # User's specific path
                "C:\\iCloud_Drive\\AI\\Models"  # Alternative example path
            ]
            
            models_dir = None
            for path in possible_paths:
                if os.path.exists(path):
                    models_dir = os.path.abspath(path)
                    print(f"   • Found models directory: {models_dir}")
                    break
        
        if not models_dir:
            raise ValueError("Could not locate ComfyUI models directory")
        
        # Build path to Janus-Pro model
        model_path = os.path.join(models_dir, "Janus-Pro", model_variant)
        
        if not os.path.exists(model_path):
            raise ValueError(f"Janus-Pro model not found at: {model_path}")
        
        return model_path

    def tensor_to_pil(self, image_tensor) -> Image.Image:
        """Convert ComfyUI image tensor to PIL Image"""
        try:
            if len(image_tensor.shape) == 4:
                # Batch of images, take first one
                img_tensor = image_tensor[0]
            else:
                img_tensor = image_tensor
            
            # Convert from torch tensor to PIL Image
            img_array = (img_tensor.cpu().numpy() * 255).astype('uint8')
            pil_image = Image.fromarray(img_array)
            
            # Convert to RGB if needed
            if pil_image.mode != "RGB":
                pil_image = pil_image.convert("RGB")
            
            return pil_image
        
        except Exception as e:
            raise ValueError(f"Failed to convert tensor to PIL image: {str(e)}")

    def resize_image(self, pil_image: Image.Image, max_side: int = 1024) -> Image.Image:
        """Resize image while maintaining aspect ratio"""
        width, height = pil_image.size
        
        if max(width, height) <= max_side:
            return pil_image
        
        if width > height:
            new_width = max_side
            new_height = int(height * max_side / width)
        else:
            new_height = max_side
            new_width = int(width * max_side / height)
        
        return pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)

    def generate_cache_key(self, model_variant: str, system_prompt: str, image_tensor, 
                          max_tokens: int, temperature: float, seed: int) -> str:
        """Generate cache key for reproducible results"""
        cache_data = {
            "model_variant": model_variant,
            "system_prompt": system_prompt.strip(),
            "max_tokens": max_tokens,
            "temperature": temperature,
            "seed": seed
        }
        
        # Include image hash
        image_hash = hashlib.md5(str(image_tensor.shape).encode() + 
                                str(image_tensor.sum().item()).encode()).hexdigest()[:8]
        cache_data["image_hash"] = image_hash
        
        cache_str = json.dumps(cache_data, sort_keys=True)
        return hashlib.md5(cache_str.encode()).hexdigest()

    def load_from_cache(self, cache_key: str) -> Optional[str]:
        """Load result from cache if exists"""
        cache_file = os.path.join(self.cache_dir, f"{cache_key}_janus.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                    return cache_data.get("result")
            except Exception as e:
                S4APILogger.warning("JanusProCache", f"Failed to load cache: {e}")
        return None

    def save_to_cache(self, cache_key: str, result: str) -> None:
        """Save result to cache"""
        cache_file = os.path.join(self.cache_dir, f"{cache_key}_janus.json")
        try:
            cache_data = {
                "result": result,
                "timestamp": json.dumps({"time": "cached"})
            }
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            S4APILogger.warning("JanusProCache", f"Failed to save cache: {e}")

    def load_janus_model(self, model_path: str):
        """Load Janus-Pro model and processor"""
        
        cache_key = ("janus", model_path)
        
        # Check global cache first
        processor = self._GLOBAL_PROCESSOR_CACHE.get(cache_key)
        model = self._GLOBAL_MODEL_CACHE.get(cache_key)
        
        if processor is not None and model is not None:
            return processor, model
        
        # Check if model path exists
        if not os.path.exists(model_path):
            error_msg = (
                f"❌ Model path does not exist: {model_path}\n\n"
                "Please download the Janus-Pro model:\n\n"
                "1. Using git clone:\n"
                "   git clone https://huggingface.co/deepseek-ai/Janus-Pro-1B\n"
                f"   and move to {model_path}\n\n"
                "2. Or download manually and place in correct directory.\n"
            )
            raise RuntimeError(error_msg)
        
        try:
            # Try native Janus loader first
            from janus.models import MultiModalityCausalLM, VLChatProcessor
            
            S4APILogger.debug("JanusProModel", "Loading with native Janus processor...")
            
            if processor is None:
                processor = VLChatProcessor.from_pretrained(model_path, trust_remote_code=True)
                self._GLOBAL_PROCESSOR_CACHE[cache_key] = processor
            
            if model is None:
                device = "cuda" if torch.cuda.is_available() else "cpu"
                dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
                
                model = MultiModalityCausalLM.from_pretrained(
                    model_path, 
                    dtype=dtype,
                    trust_remote_code=True
                )
                model = model.to(device).eval()
                self._GLOBAL_MODEL_CACHE[cache_key] = model
            
            return processor, model
            
        except Exception as e:
            # If native Janus fails, provide clear instructions
            error_msg = (
                f"❌ Janus-Pro model loading failed: {str(e)}\n\n"
                "Possible solutions:\n\n"
                "1. Confirm model path is correct:\n"
                f"   {model_path}\n\n"
                "2. Confirm model files are complete:\n"
                "   - config.json\n"
                "   - pytorch_model.bin or model.safetensors\n"
                "   - tokenizer files\n\n"
                "3. Try re-downloading the model:\n"
                "   git clone https://huggingface.co/deepseek-ai/Janus-Pro-1B\n\n"
                "4. Check dependency versions:\n"
                "   pip list | grep -E 'transformers|torch|janus'\n"
            )
            raise RuntimeError(error_msg)

    def analyze_image(self, image, model_variant="Janus-Pro-1B", system_prompt="", 
                     max_tokens=768, temperature=0.6, seed=0):
        """Main function to analyze image and generate prompts"""
        
        # Log API call start
        params = {
            "model": model_variant,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "seed": seed,
            "prompt": system_prompt
        }
        log_api_start("JanusPro", "Janus-Pro Image Analysis", params)
        
        try:
            # Get model path
            log_processing_step("JanusPro", "Locating model")
            model_path = self.get_model_path(model_variant)
            S4APILogger.debug("JanusPro", f"Model path: {model_path}")
            
            # Generate cache key
            cache_key = self.generate_cache_key(model_variant, system_prompt, image, 
                                              max_tokens, temperature, seed)
            
            # Check cache first
            if seed > 0:
                cached_result = self.load_from_cache(cache_key)
                if cached_result:
                    log_cache_hit("JanusPro", cache_key)
                    return (cached_result,)
                else:
                    log_cache_miss("JanusPro", cache_key)
            
            # Convert image
            log_processing_step("JanusPro", "Processing image")
            pil_image = self.tensor_to_pil(image)
            pil_image = self.resize_image(pil_image, max_side=1024)
            S4APILogger.debug("JanusPro", f"Image size: {pil_image.size}")
            
            # Load model
            log_processing_step("JanusPro", "Loading Janus-Pro model")
            processor, model = self.load_janus_model(model_path)
            
            # Prepare input
            conversation = [
                {
                    "role": "<|User|>",
                    "content": f"<image_placeholder>\\n{system_prompt}",
                    "images": [pil_image],
                },
                {"role": "<|Assistant|>", "content": ""},
            ]
            
            # Process inputs
            prepare_inputs = processor(
                conversations=conversation,
                images=[pil_image],
                force_batchify=True,
            )
            
            device = next(model.parameters()).device
            prepare_inputs = prepare_inputs.to(device)
            
            # Generate
            log_processing_step("JanusPro", "Generating analysis")
            inputs_embeds = model.prepare_inputs_embeds(**prepare_inputs)
            
            with torch.no_grad():
                outputs = model.language_model.generate(
                    inputs_embeds=inputs_embeds,
                    attention_mask=prepare_inputs.attention_mask,
                    pad_token_id=processor.tokenizer.eos_token_id,
                    bos_token_id=processor.tokenizer.bos_token_id,
                    eos_token_id=processor.tokenizer.eos_token_id,
                    max_new_tokens=int(max_tokens),
                    do_sample=bool(temperature and temperature > 0.0),
                    temperature=float(temperature) if temperature > 0 else None,
                    top_p=0.95,
                    use_cache=True,
                )
            
            # Decode result
            analysis = processor.tokenizer.decode(
                outputs[0].cpu().tolist(), 
                skip_special_tokens=True
            ).strip()
            
            # Log success
            result_info = f"Generated {len(analysis)} characters"
            preview = analysis[:50] + "..." if len(analysis) > 50 else analysis
            result_info += f", Preview: {preview}"
            log_api_success("JanusPro", "Janus-Pro Image Analysis", result_info)
            
            # Save to cache
            if seed > 0:
                self.save_to_cache(cache_key, analysis)
                S4APILogger.cache("JanusPro", "Result saved to cache")
            
            return (analysis,)
            
        except Exception as e:
            error_msg = f"Janus-Pro analysis failed: {str(e)}"
            S4APILogger.error("JanusPro", error_msg)
            raise ValueError(error_msg)
