"""
ComfyUI BuimenLabo - Unified Package
A comprehensive collection of ComfyUI custom nodes including:
- Multi-language locale toggle with 20 language support
- AI-powered pose analysis with Gemini integration
- Advanced LLM text generation with llama-cpp-python
- Smart ControlNet loader for SD/SDXL models
- AI-powered prompt translation

Author: BuimenLabo
Blog: https://note.com/hirodream44
"""

# Import version information
try:
    from ._version import __version__, PACKAGE_NAME, AUTHOR, BLOG_URL, print_version_info
    print(f"🚀 {PACKAGE_NAME} v{__version__} - {AUTHOR}")
    print(f"📝 Blog: {BLOG_URL}")
except ImportError:
    __version__ = "1.0.0"
    print(f"🚀 ComfyUI BuimenLabo v{__version__}")

import os
import sys
import logging
import random

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

print("🚀 Loading ComfyUI BuimenLabo unified package...")

# Import all packages as-is (simple copy approach)
try:
    # 1. Gemini Pose Analyzer
    from .gemini_pose_analyzer import NODE_CLASS_MAPPINGS as GEMINI_MAPPINGS
    from .gemini_pose_analyzer import NODE_DISPLAY_NAME_MAPPINGS as GEMINI_DISPLAY_MAPPINGS
    print("✅ GeminiPoseAnalyzer loaded")
except Exception as e:
    print(f"⚠️ GeminiPoseAnalyzer failed: {e}")
    GEMINI_MAPPINGS = {}
    GEMINI_DISPLAY_MAPPINGS = {}

try:
    # 2. LlamaCpp Text (working version)
    from .llama_cpp_node import (
        LlamaCppLoader,
        LlamaCppGenerate, 
        LlamaCppUnload,
        LlamaCppSafeUnload,
        LlamaCppMemoryInfo,
        LlamaCppCompleteUnload,
        LlamaCppAIO,
    )
    LLAMA_MAPPINGS = {
        "LlamaCppLoader": LlamaCppLoader,
        "LlamaCppGenerate": LlamaCppGenerate,
        "LlamaCppUnload": LlamaCppUnload,
        "LlamaCppSafeUnload": LlamaCppSafeUnload,
        "LlamaCppMemoryInfo": LlamaCppMemoryInfo,
        "LlamaCppCompleteUnload": LlamaCppCompleteUnload,
        "LlamaCppAIO": LlamaCppAIO,
    }
    LLAMA_DISPLAY_MAPPINGS = {
        "LlamaCppLoader": "Llama-CPP Loader",
        "LlamaCppGenerate": "Llama-CPP Generate",
        "LlamaCppUnload": "Llama-CPP Unload VRAM", 
        "LlamaCppSafeUnload": "Llama-CPP Safe Unload",
        "LlamaCppMemoryInfo": "Llama-CPP Memory Info",
        "LlamaCppCompleteUnload": "Llama-CPP Complete Unload",
        "LlamaCppAIO": "🚀 Llama-CPP All-In-One",
    }
    print("✅ LlamaCppText loaded")
except Exception as e:
    print(f"⚠️ LlamaCppText failed: {e}")
    LLAMA_MAPPINGS = {}
    LLAMA_DISPLAY_MAPPINGS = {}

try:
    # 3. Multi ControlNet Loader
    from .multi_controlnet_loader import NODE_CLASS_MAPPINGS as CONTROLNET_MAPPINGS
    from .multi_controlnet_loader import NODE_DISPLAY_NAME_MAPPINGS as CONTROLNET_DISPLAY_MAPPINGS
    print("✅ MultiControlNetLoader loaded")
except Exception as e:
    print(f"⚠️ MultiControlNetLoader failed: {e}")
    CONTROLNET_MAPPINGS = {}
    CONTROLNET_DISPLAY_MAPPINGS = {}

try:
    # 4. Prompt Translator
    from .prompt_translator import NODE_CLASS_MAPPINGS as TRANSLATOR_MAPPINGS
    from .prompt_translator import NODE_DISPLAY_NAME_MAPPINGS as TRANSLATOR_DISPLAY_MAPPINGS
    print("✅ PromptTranslator loaded")
except Exception as e:
    print(f"⚠️ PromptTranslator failed: {e}")
    TRANSLATOR_MAPPINGS = {}
    TRANSLATOR_DISPLAY_MAPPINGS = {}

try:
    # 5. Locale Toggle
    from .locale_toggle import NODE_CLASS_MAPPINGS as LOCALE_MAPPINGS
    from .locale_toggle import NODE_DISPLAY_NAME_MAPPINGS as LOCALE_DISPLAY_MAPPINGS
    print("✅ LocaleToggle loaded")
except Exception as e:
    print(f"⚠️ LocaleToggle failed: {e}")
    LOCALE_MAPPINGS = {}
    LOCALE_DISPLAY_MAPPINGS = {}

# Setup LlamaCpp routes (from working version)
def setup_llama_routes():
    """Setup llama generation routes using ComfyUI's standard method."""
    try:
        from server import PromptServer
        from aiohttp import web
        import json
        
        routes = PromptServer.instance.routes
        
        @routes.post('/llama_cpp/generate')
        async def handle_llama_generation(request):
            try:
                print(f"🔄 Llama generation request received")
                
                # Get form data
                data = await request.post()
                
                instruction_prompt = data.get('instruction_prompt', '')
                input_text = data.get('input_text', '')
                model_path = data.get('model_path', '')
                max_tokens = int(data.get('max_tokens', '128'))
                temperature = float(data.get('temperature', '0.8'))
                top_k = int(data.get('top_k', '40'))
                top_p = float(data.get('top_p', '0.95'))
                repeat_penalty = float(data.get('repeat_penalty', '1.1'))
                n_ctx = int(data.get('n_ctx', '2048'))
                n_gpu_layers = int(data.get('n_gpu_layers', '40'))
                nsfw_filter = data.get('nsfw_filter', 'true').lower() == 'true'
                auto_unload = data.get('auto_unload', 'true').lower() == 'true'
                
                print(f"📋 Request data: model={model_path}, text='{input_text[:50]}...', max_tokens={max_tokens}")
                
                if not input_text.strip() and not instruction_prompt.strip():
                    return web.json_response({'error': 'No input text provided'}, status=400)
                
                # Import llama functions
                from .llama_cpp_node import _get_or_create_model, _cleanup_memory
                import random
                
                # Load model
                try:
                    llm = _get_or_create_model(model_path, n_ctx, n_gpu_layers, False)
                    print(f"✅ Model loaded for API: {model_path}")
                except Exception as e:
                    print(f"❌ Model loading failed: {e}")
                    return web.json_response({'error': f'Model loading failed: {str(e)}'}, status=500)
                
                # Generate text
                try:
                    # Build prompt（指示と内容を明確に分離）
                    instruction = instruction_prompt if instruction_prompt else ""
                    content = input_text if input_text else "A beautiful scene"
                    
                    # デフォルト指示を設定（ユーザー指示がない場合）
                    if not instruction.strip():
                        instruction = """You are a prompt generator for Stable Diffusion.

Task:
Convert the input description into ONE optimized English prompt for Stable Diffusion.
Rules:
- Output format: single line of comma-separated phrases only
- Stay faithful to the original description's content and intent
- Include: subject, setting/environment, lighting, mood/atmosphere, artistic style, quality tags
- Use descriptive English terms that accurately represent the input
- Do not censor or sanitize the content - convert faithfully
- End cleanly after final quality tag

Output:
<prompt>"""
                    
                    # ランダムなバリエーション用のリスト（適度に調整）
                    variation_prompts = [
                        "Focus on artistic composition and visual details.",
                        "Emphasize mood and atmospheric elements.", 
                        "Include creative styling and artistic elements.",
                        "Add detailed lighting and visual effects."
                    ]
                    
                    quality_tags = [
                        "masterpiece, best quality, highly detailed",
                        "high quality, detailed artwork", 
                        "ultra detailed, premium quality",
                        "best quality, intricate details"
                    ]
                    
                    # 内部固定システムプロンプト
                    system_prompt = "You are a creative writing assistant. Respond in English."
                    
                    # Apply NSFW filter if requested
                    if nsfw_filter:
                        safety_note = " IMPORTANT: If inappropriate content, respond with 'I cannot generate prompts for inappropriate content.' Otherwise, respond ONLY in English."
                        wrapped_prompt = f"""{system_prompt}

{instruction}{safety_note}

Input to convert: {content}

Response (English only):"""
                    else:
                        # 最小限のランダム性のみ追加
                        if random.choice([True, False]):
                            quality_hint = random.choice(quality_tags)
                            wrapped_prompt = f"""{system_prompt}

{instruction}

Input to convert: {content}

Hint: Consider quality tags like "{quality_hint}"

Response (English only):"""
                        else:
                            wrapped_prompt = f"""{system_prompt}

{instruction}

Input to convert: {content}

Response (English only):"""
                    
                    # Generate
                    seed = random.randint(1, 2**31 - 1)
                    response = llm.create_completion(
                        prompt=wrapped_prompt,
                        max_tokens=max_tokens,
                        temperature=temperature,
                        top_k=top_k,
                        top_p=top_p,
                        repeat_penalty=repeat_penalty,
                        seed=seed,
                        stream=False,
                        echo=False,
                        stop=["</s>"]
                    )
                    
                    if isinstance(response, dict) and "choices" in response:
                        result_text = response["choices"][0]["text"]
                    else:
                        result_text = str(response)
                    
                    # Clean up result
                    result_text = result_text.strip()
                    for stop_token in ["</s>", "<eos>"]:
                        if stop_token in result_text:
                            result_text = result_text.split(stop_token)[0]
                    
                    if not result_text or result_text.strip() == "":
                        result_text = "レスポンスが生成されませんでした。"
                    
                    print(f"✅ Generation completed: {len(result_text)} characters")
                    
                    # Auto unload if requested
                    if auto_unload:
                        print("🧹 Auto unloading VRAM...")
                        try:
                            _cleanup_memory()
                            print("✅ Auto unload completed")
                        except Exception as cleanup_e:
                            print(f"⚠️ Auto unload warning: {cleanup_e}")
                    
                    return web.json_response({
                        'success': True,
                        'generated_text': result_text,
                        'input_text': f"{instruction}\n\n{content}",
                        'token_count': len(result_text.split()),
                        'model_path': model_path,
                        'auto_unload': auto_unload
                    })
                    
                except Exception as e:
                    print(f"❌ Generation error: {e}")
                    return web.json_response({'error': f'Generation failed: {str(e)}'}, status=500)
                
            except Exception as e:
                print(f"❌ API error: {e}")
                import traceback
                traceback.print_exc()
                return web.json_response({
                    'success': False,
                    'error': str(e)
                }, status=500)
        
        @routes.post('/llama_cpp/unload')
        async def handle_llama_unload(request):
            try:
                print(f"🗑️ Llama unload request received")
                
                # Import cleanup functions
                from .llama_cpp_node import _cleanup_memory, _force_cleanup_memory
                
                # Execute force memory cleanup for better VRAM release
                _force_cleanup_memory()
                
                print(f"✅ VRAM cleanup completed")
                
                return web.json_response({
                    'success': True,
                    'message': 'VRAM has been successfully freed. All models have been unloaded from memory.'
                })
                
            except Exception as e:
                print(f"❌ Unload API error: {e}")
                import traceback
                traceback.print_exc()
                return web.json_response({
                    'success': False,
                    'error': str(e)
                }, status=500)
        
        print("✅ Llama-CPP routes registered")
        
    except Exception as e:
        print(f"⚠️ Could not register llama routes: {e}")

# Setup routes when server is available
try:
    setup_llama_routes()
    print("🚀 BuimenLabo: Routes setup completed successfully")
except Exception as e:
    print(f"⚠️ BuimenLabo: Routes setup failed: {e}")

# Combine all node mappings with BuimenLabo prefix
NODE_CLASS_MAPPINGS = {}
NODE_DISPLAY_NAME_MAPPINGS = {}

# Add with prefixes to avoid conflicts
for name, cls in GEMINI_MAPPINGS.items():
    NODE_CLASS_MAPPINGS[f"BuimenLabo_{name}"] = cls
for name, display in GEMINI_DISPLAY_MAPPINGS.items():
    NODE_DISPLAY_NAME_MAPPINGS[f"BuimenLabo_{name}"] = f"🔍 BuimenLabo {display}"

for name, cls in LLAMA_MAPPINGS.items():
    NODE_CLASS_MAPPINGS[f"BuimenLabo_{name}"] = cls
for name, display in LLAMA_DISPLAY_MAPPINGS.items():
    NODE_DISPLAY_NAME_MAPPINGS[f"BuimenLabo_{name}"] = f"🦙 BuimenLabo {display}"

for name, cls in CONTROLNET_MAPPINGS.items():
    NODE_CLASS_MAPPINGS[f"BuimenLabo_{name}"] = cls
for name, display in CONTROLNET_DISPLAY_MAPPINGS.items():
    NODE_DISPLAY_NAME_MAPPINGS[f"BuimenLabo_{name}"] = f"🎛️ BuimenLabo {display}"

for name, cls in TRANSLATOR_MAPPINGS.items():
    NODE_CLASS_MAPPINGS[f"BuimenLabo_{name}"] = cls
for name, display in TRANSLATOR_DISPLAY_MAPPINGS.items():
    NODE_DISPLAY_NAME_MAPPINGS[f"BuimenLabo_{name}"] = f"🌐 BuimenLabo {display}"

for name, cls in LOCALE_MAPPINGS.items():
    NODE_CLASS_MAPPINGS[f"BuimenLabo_{name}"] = cls
for name, display in LOCALE_DISPLAY_MAPPINGS.items():
    NODE_DISPLAY_NAME_MAPPINGS[f"BuimenLabo_{name}"] = f"🌍 BuimenLabo {display}"

# Set WEB_DIRECTORY for JavaScript files
WEB_DIRECTORY = "./web"

# Package information
print("📦 ComfyUI BuimenLabo package loaded successfully!")
print(f"🎯 Total nodes: {len(NODE_CLASS_MAPPINGS)}")
print("📝 Blog: https://note.com/hirodream44")
print("🔗 Features: Multi-language, AI pose analysis, LLM generation, Smart ControlNet, Prompt translation")

# Export for ComfyUI
__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', 'WEB_DIRECTORY']