import gc
from pathlib import Path
import random
import inspect
import os
import sys

# Current directory for fallback imports
_current_dir = os.path.dirname(os.path.abspath(__file__))

def _detect_platform():
    """Detect current platform and return appropriate library path"""
    import platform
    system = platform.system().lower()
    machine = platform.machine().lower()
    
    if system == "windows":
        return "windows"
    elif system == "linux":
        return "linux"
    elif system == "darwin":
        return "macos"
    else:
        return "unknown"

def _try_import_llama():
    """Try to import llama-cpp-python with fallback strategies"""
    
    platform_name = _detect_platform()
    print(f"Detected platform: {platform_name}")
    
    # Strategy 1: Try system-installed llama-cpp-python
    try:
        import llama_cpp
        from llama_cpp import Llama
        print(f"Successfully imported system llama-cpp-python from: {llama_cpp.__file__}")
        print(f"System version: {getattr(llama_cpp, '__version__', 'unknown')}")
        return Llama, "system"
    except ImportError:
        print("System llama-cpp-python not found")
        print("Please install llama-cpp-python:")
        print("For CPU: pip install llama-cpp-python")
        print("For GPU: CMAKE_ARGS='-DGGML_CUDA=on' pip install llama-cpp-python --force-reinstall --no-cache-dir")
    
    # Strategy 2: Use fallback implementation
    print("Using fallback implementation for development")
    print("Run install_precompiled.bat or install_to_comfyui.bat to install GPU version")
    sys.path.insert(0, _current_dir)
    from llama_fallback import LlamaFallback as Llama
    return Llama, "fallback"

# Import llama with fallback strategies
print("=" * 50)
print("[LlamaCppText] STARTING IMPORT PROCESS")
print(f"[LlamaCppText] Current working directory: {os.getcwd()}")
print(f"[LlamaCppText] Script directory: {_current_dir}")
print(f"[LlamaCppText] sys.path first 3 entries: {sys.path[:3]}")
print("=" * 50)

Llama, _import_source = _try_import_llama()

print("=" * 50)
print(f"[LlamaCppText] IMPORT RESULT: {_import_source}")
print(f"[LlamaCppText] Llama class: {Llama}")
print(f"[LlamaCppText] Llama module: {Llama.__module__ if hasattr(Llama, '__module__') else 'unknown'}")
print("=" * 50)
from comfy.model_management import soft_empty_cache, unload_all_models
from folder_paths import models_dir

CATEGORY = "Llama-CPP"

def _ensure_llm_directory_exists():
    """models/llmディレクトリが存在しない場合は作成し、ガイドファイルを配置"""
    llm_dir = Path(models_dir) / "llm"
    
    if not llm_dir.exists():
        try:
            llm_dir.mkdir(parents=True, exist_ok=True)
            print(f"📁 Created models/llm directory: {llm_dir}")
            
            # モデル配置ガイドファイルを作成
            guide_file = llm_dir / "📋_モデル配置ガイド.txt"
            
            guide_content = """🚀 Llama-CPP モデル配置ガイド
========================================

このフォルダにGGUFモデルファイルを配置してください。

📥 推奨モデル (Hugging Face):
----------------------------------------

🎯 日本語対応モデル:
• Qwen2.5-7B-Instruct (多言語対応、日本語強い)
  https://huggingface.co/bartowski/Qwen2.5-7B-Instruct-GGUF
  ファイル: Qwen2.5-7B-Instruct-Q8_0.gguf (推奨)

• Japanese-Gemma-2-9B (日本語特化)
  https://huggingface.co/bartowski/Japanese-Gemma-2-9B-GGUF
  ファイル: Japanese-Gemma-2-9B-Q6_K.gguf

• ELYZA-japanese-Llama-2-7b (日本語特化)
  https://huggingface.co/mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf
  ファイル: ELYZA-japanese-Llama-2-7b-instruct-q8_0.gguf

🌍 英語モデル:
• Llama-3.1-8B-Instruct (Meta公式)
  https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF
  ファイル: Meta-Llama-3.1-8B-Instruct-Q8_0.gguf

• Mistral-7B-Instruct-v0.3 (高性能)
  https://huggingface.co/bartowski/Mistral-7B-Instruct-v0.3-GGUF
  ファイル: Mistral-7B-Instruct-v0.3-Q8_0.gguf

📋 ダウンロード手順:
----------------------------------------
1. 上記リンクをクリックしてHugging Faceページを開く
2. "Files and versions"タブをクリック
3. 推奨ファイル名(.gguf)をクリックしてダウンロード
4. このフォルダに保存する

💡 量子化レベルの選び方:
----------------------------------------
• Q8_0: 最高品質 (8GB以上のVRAM推奨)
• Q6_K: 高品質 (6GB以上のVRAM推奨)  
• Q4_K_M: 標準品質 (4GB以上のVRAM推奨)
• Q3_K_M: 軽量 (3GB以上のVRAM推奨)

🔧 使用方法:
----------------------------------------
1. .ggufファイルをこのフォルダに配置
2. ComfyUIを再起動
3. Llama-CPPノードでモデルを選択

⚠️ 注意事項:
----------------------------------------
• ファイル名に日本語は使わない
• ファイルサイズが大きいのでダウンロード時間に注意
• 初回読み込み時は時間がかかります

🎉 配置完了後はこのファイルを削除してもOKです！
"""
            
            with open(guide_file, 'w', encoding='utf-8') as f:
                f.write(guide_content)
            
            print(f"📋 Created model guide file: {guide_file}")
            
            # 追加でWebリンクファイルも作成（クリックでブラウザが開く）
            web_link_file = llm_dir / "🔗_Hugging_Face_モデル検索.url"
            url_content = """[InternetShortcut]
URL=https://huggingface.co/models?library=gguf&sort=trending&search=instruct
"""
            with open(web_link_file, 'w', encoding='utf-8') as f:
                f.write(url_content)
            
            print(f"🔗 Created web link file: {web_link_file}")
            
        except Exception as e:
            print(f"❌ Failed to create llm directory: {e}")
    
    return llm_dir

# 初期化時にディレクトリを確認・作成
_ensure_llm_directory_exists()

# グローバル変数でローダー設定とモデルキャッシュを保存
_last_loader_settings = {}
_cached_model = None
_model_cache_key = None

def _generate_cache_key(model_path, n_ctx, n_gpu_layers):
    """設定からキャッシュキーを生成"""
    return f"{model_path}_{n_ctx}_{n_gpu_layers}"

def _get_or_create_model(model_path, n_ctx, n_gpu_layers, force_reload=False):
    """モデルを取得または作成（キャッシュ機能付き）"""
    global _cached_model, _model_cache_key, _last_loader_settings
    
    cache_key = _generate_cache_key(model_path, n_ctx, n_gpu_layers)
    
    # force_reload=Trueの場合は強制的に新しいモデルをロード（最優先処理）
    if force_reload:
        print(f"Force reloading model: {model_path}")
        # 既存のモデルを完全にクリア
        _clear_cached_model()
        # メモリをクリア
        _cleanup_memory()
        
    else:
        # 既存のモデルが同じ設定で利用可能かチェック
        if _cached_model is not None and _model_cache_key == cache_key:
            print(f"Found cached model with matching key: {cache_key}")
            # モデルの有効性を確認（より厳密にチェック）
            if _validate_cached_model(_cached_model):
                print(f"Using cached model: {model_path}")
                return _cached_model
            else:
                print(f"Cached model is invalid, clearing and reloading: {model_path}")
                _clear_cached_model()
                _cleanup_memory()
        else:
            if _cached_model is not None:
                print(f"Cache key mismatch. Cached: {_model_cache_key}, Required: {cache_key}")
            else:
                print("No cached model found")
    
    # 新しいモデルをロード
    print(f"Loading new model: {model_path}")
    print(f"Settings - n_ctx: {n_ctx}, n_gpu_layers: {n_gpu_layers}")
    
    try:
        # Force CUDA usage
        import os
        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        
        llm = Llama(
            model_path=model_path,
            n_ctx=n_ctx,
            n_gpu_layers=n_gpu_layers,
            verbose=True,  # Enable verbose to see GPU usage
            chat_format="chatml",
            # Additional CUDA-specific parameters
            use_mmap=True,
            use_mlock=False,
        )
        
        # 新しくロードしたモデルの有効性もテスト
        if not _validate_cached_model(llm):
            raise Exception("Newly loaded model failed validation")
        
        # キャッシュを更新
        _cached_model = llm
        _model_cache_key = cache_key
        _last_loader_settings = {
            "model_path": model_path,
            "n_ctx": n_ctx,
            "n_gpu_layers": n_gpu_layers
        }
        
        print("Model loaded, validated, and cached successfully")
        return llm
        
    except Exception as e:
        print(f"Model loading failed: {e}")
        _cached_model = None
        _model_cache_key = None
        raise e

def _clear_cached_model():
    """キャッシュされたモデルを完全にクリア"""
    global _cached_model, _model_cache_key
    
    if _cached_model is not None:
        try:
            # モデルのcloseメソッドがある場合は呼び出し
            if hasattr(_cached_model, 'close'):
                _cached_model.close()
                print("Model.close() executed")
        except Exception as e:
            print(f"Error closing previous model: {e}")
        
        # キャッシュをクリア
        _cached_model = None
        _model_cache_key = None
        print("Cached model cleared")

def _cleanup_memory():
    """メモリクリーンアップを実行"""
    global _cached_model, _model_cache_key
    
    print("=== Starting VRAM cleanup ===")
    
    # Step 1: Clear cached model
    if _cached_model is not None:
        try:
            print(f"Starting model unload process. Model type: {type(_cached_model)}")
            
            # モデルのcloseメソッドがある場合は呼び出し
            if hasattr(_cached_model, 'close'):
                _cached_model.close()
                print("Model.close() executed")
                
            # モデルのcontextを削除
            if hasattr(_cached_model, '_ctx'):
                _cached_model._ctx = None
                print("Model context cleared")
                
            # キャッシュをクリア
            _cached_model = None
            _model_cache_key = None
            print("Model cache cleared")
            
        except Exception as e:
            print(f"Error during model cleanup: {e}")
    else:
        print("No cached model to clean up")
    
    # Step 2: ComfyUI memory management
    try:
        print("ComfyUI memory management executing...")
        unload_all_models()
        soft_empty_cache()
        print("ComfyUI memory management executed")
    except Exception as e:
        print(f"ComfyUI memory cleanup error: {e}")
    
    # Step 3: CUDA Cache cleanup
    try:
        import torch
        if torch.cuda.is_available():
            print("CUDA cache clearing...")
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
            
            # 全てのCUDAデバイスのメモリをクリア
            for i in range(torch.cuda.device_count()):
                with torch.cuda.device(i):
                    torch.cuda.empty_cache()
            print("CUDA cache cleared")
        else:
            print("CUDA not available, skipping CUDA cleanup")
    except Exception as e:
        print(f"CUDA cleanup error: {e}")
    
    # Step 4: Python garbage collection
    try:
        collected = gc.collect()
        print(f"Total GC: {collected} objects collected")
    except Exception as e:
        print(f"GC error: {e}")
    
    # Step 5: Additional CUDA memory cleanup
    try:
        import torch
        if torch.cuda.is_available():
            print("All CUDA devices memory clearing...")
            torch.cuda.empty_cache()
            for i in range(torch.cuda.device_count()):
                torch.cuda.reset_peak_memory_stats(i)
            print("All CUDA devices memory cleared")
    except Exception as e:
        print(f"Additional CUDA cleanup error: {e}")
    
    print("=== Unload Status ===")
    print("Model.close() executed" if _cached_model is None else "Model still in memory")
    print("Model context cleared")
    print("Model cache preserved for reuse" if _cached_model is None else "Model cache still active")
    print("ComfyUI memory management executed")
    print("CUDA cache cleared")
    print(f"Total GC: {collected if 'collected' in locals() else 0} objects collected")
    print("All CUDA devices memory cleared")
    print("VRAM release completed (cache preserved)")
    print("=" * 30)

def _force_cleanup_memory():
    """強制的にメモリクリーンアップを実行（完全削除）"""
    global _cached_model, _model_cache_key
    
    print("=== FORCE VRAM CLEANUP ===")
    
    # Step 1: 強制的にキャッシュモデルを削除
    try:
        if _cached_model is not None:
            print(f"Force closing model: {type(_cached_model)}")
            
            # 複数の方法でモデルを削除
            if hasattr(_cached_model, 'close'):
                _cached_model.close()
            if hasattr(_cached_model, '__del__'):
                _cached_model.__del__()
            if hasattr(_cached_model, '_ctx'):
                _cached_model._ctx = None
            if hasattr(_cached_model, 'model'):
                _cached_model.model = None
                
            # 参照を完全に削除
            del _cached_model
            
        _cached_model = None
        _model_cache_key = None
        print("✅ Model forcefully removed")
        
    except Exception as e:
        print(f"❌ Force cleanup error: {e}")
        # それでも参照をクリア
        _cached_model = None
        _model_cache_key = None
    
    # Step 2: 複数回のメモリクリーンアップ
    for i in range(3):
        try:
            unload_all_models()
            soft_empty_cache()
            gc.collect()
            print(f"Cleanup round {i+1} completed")
        except Exception as e:
            print(f"Cleanup round {i+1} error: {e}")
    
    # Step 3: 強制CUDA cleanup
    try:
        import torch
        if torch.cuda.is_available():
            # 複数回実行
            for i in range(3):
                torch.cuda.empty_cache()
                torch.cuda.synchronize()
            
            # 全デバイスのメモリを強制クリア
            for device_id in range(torch.cuda.device_count()):
                try:
                    with torch.cuda.device(device_id):
                        torch.cuda.empty_cache()
                        torch.cuda.reset_peak_memory_stats(device_id)
                        torch.cuda.reset_accumulated_memory_stats(device_id)
                except:
                    pass
            
            print("✅ CUDA force cleanup completed")
    except Exception as e:
        print(f"❌ CUDA force cleanup error: {e}")
    
    print("=== FORCE CLEANUP COMPLETED ===")
    print("All references cleared, VRAM should be freed")
    print("=" * 40)

def _validate_cached_model(model):
    """キャッシュされたモデルの有効性をチェック"""
    if model is None:
        return False
    
    try:
        # llama-cppモデルの基本的な属性の存在確認
        if not hasattr(model, 'create_completion') and not hasattr(model, '__call__'):
            print("Model validation failed: Missing required methods")
            return False
        
        # より厳密なテスト - 実際に短いテキストを生成
        try:
            test_response = model.create_completion(
                prompt="Hi",
                max_tokens=2,
                temperature=0.0,
                stream=False,
                echo=False
            )
            
            # レスポンスの構造をチェック
            if isinstance(test_response, dict) and "choices" in test_response:
                choices = test_response.get("choices", [])
                if len(choices) > 0 and "text" in choices[0]:
                    print("Model validation passed: create_completion works")
                    return True
            
            print("Model validation failed: Invalid response structure")
            return False
            
        except Exception as e:
            print(f"Model validation failed with create_completion: {e}")
            # create_completionが失敗した場合、__call__でテスト
            try:
                test_response = model("Hi", max_tokens=2, temperature=0.0)
                if test_response:
                    print("Model validation passed: __call__ works")
                    return True
            except Exception as e2:
                print(f"Model validation failed with __call__: {e2}")
            
            return False
            
    except Exception as e:
        print(f"Model validation failed: {e}")
        return False

class LlamaCppLoader:
    @classmethod
    def INPUT_TYPES(cls):
        llm_root = Path(models_dir) / "llm"
        
        # ディレクトリが存在することを確認（ない場合は作成）
        _ensure_llm_directory_exists()
        
        model_files = sorted(str(p) for p in llm_root.rglob("*.gguf"))
        
        # モデルファイルが見つからない場合は親切なメッセージを追加
        if not model_files:
            model_files = [
                "❌ モデルファイル(.gguf)が見つかりません",
                "📁 models/llm フォルダを確認してください", 
                "🔗 詳細は「📋_モデル配置ガイド.txt」を参照"
            ]
            default = model_files[0]
        else:
            default = model_files[0]
            
        return {
            "required": {
                "model_path": (model_files, {"default": default}),
                "n_ctx": ("INT", {"default": 2048, "min": 1, "max": 65536, "step": 256}),
                "n_gpu_layers": ("INT", {"default": 40, "min": 0, "max": 128}),
                "force_reload": ("BOOLEAN", {"default": False}),
                "use_cache": ("BOOLEAN", {"default": True}),  # キャッシュ使用オプション追加
            }
        }

    RETURN_TYPES = ("LLAMA_MODEL",)
    FUNCTION = "load"
    RETURN_NAMES = ("model",)

    def load(self, model_path, n_ctx, n_gpu_layers, force_reload, use_cache):
        try:
            # モデルファイルが見つからない場合の親切なエラーメッセージ
            if model_path.startswith("❌"):
                error_msg = """
🚨 モデルファイルが見つかりません！

📁 models/llm フォルダに .gguf ファイルを配置してください。

📋 詳細手順:
1. models/llm/📋_モデル配置ガイド.txt を確認
2. Hugging Faceから推奨モデルをダウンロード
3. .ggufファイルを models/llm/ に配置
4. ComfyUIを再起動

🔗 推奨モデル例:
• Qwen2.5-7B-Instruct-Q8_0.gguf (日本語対応)
• Meta-Llama-3.1-8B-Instruct-Q8_0.gguf (英語)
"""
                print(error_msg)
                raise Exception("モデルファイルが見つかりません。models/llm フォルダを確認してください。")
            
            # use_cache=Falseの場合は強制的にforce_reload=Trueに設定
            if not use_cache:
                force_reload = True
                print("Cache disabled, forcing model reload")
            
            llm = _get_or_create_model(model_path, n_ctx, n_gpu_layers, force_reload)
            return (llm,)
        except Exception as e:
            print(f"Loader error: {e}")
            return (None,)

class LlamaCppGenerate:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("LLAMA_MODEL",),
                "text1": ("STRING", {"default": "You are a prompt generator for Stable Diffusion.\n\nTask:\nConvert the input description into ONE optimized English prompt for Stable Diffusion.\nRules:\n- Output format: single line of comma-separated phrases only\n- Stay faithful to the original description's content and intent\n- Include: subject, setting/environment, lighting, mood/atmosphere, artistic style, quality tags\n- Use descriptive English terms that accurately represent the input\n- Do not censor or sanitize the content - convert faithfully\n- End cleanly after final quality tag\n\nOutput:\n<prompt>", "multiline": True}),
                "text2": ("STRING", {"default": "プールサイドで寝そべっている女性", "multiline": True}),
                "max_tokens": ("INT", {"default": 128, "min": 1, "max": 4096}),
                "temperature": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0}),
                "top_k": ("INT", {"default": 40, "min": 0, "max": 100}),
                "top_p": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 1.0}),
                "repeat_penalty": ("FLOAT", {"default": 1.1, "min": 0.0, "max": 2.0}),
                "use_random_seed": ("BOOLEAN", {"default": True}),
                "seed": ("INT", {"default": 0, "min": 0}),
                "nsfw_filter": ("BOOLEAN", {"default": True}),
                "auto_reload": ("BOOLEAN", {"default": True}),
            }
        }

    RETURN_TYPES = ("STRING", "LLAMA_MODEL")
    FUNCTION = "generate"
    RETURN_NAMES = ("text", "model")

    def _try_auto_reload(self):
        """モデルの自動再読み込みを試行"""
        global _last_loader_settings, _cached_model
        
        if not _last_loader_settings:
            raise Exception("前回のモデル設定が見つかりません。最初にLoaderノードを実行してください。")
        
        print("モデルが無効なため、自動的に再読み込みしています...")
        print(f"設定: {_last_loader_settings}")
        
        try:
            llm = _get_or_create_model(
                _last_loader_settings["model_path"],
                _last_loader_settings["n_ctx"],
                _last_loader_settings["n_gpu_layers"],
                force_reload=True  # 自動再読み込みは常に強制リロード
            )
            return llm
        except Exception as e:
            print(f"自動再読み込み失敗: {e}")
            raise e

    def _validate_model(self, model):
        """モデルの有効性をチェック"""
        return _validate_cached_model(model)

    def generate(self, model, text1, text2, max_tokens, temperature, top_k, top_p, repeat_penalty, use_random_seed, seed, nsfw_filter, auto_reload):
        try:
            # モデルの有効性チェック（より厳密に）
            print(f"Checking model validity. Model type: {type(model)}")
            
            if not self._validate_model(model):
                print("Model validation failed in generate")
                
                if auto_reload:
                    try:
                        print("Attempting auto-reload...")
                        model = self._try_auto_reload()
                        
                        # 再読み込み後も有効性をチェック
                        if not self._validate_model(model):
                            raise Exception("自動再読み込み後もモデルが無効です")
                        else:
                            print("Auto-reload successful, model validated")
                            
                    except Exception as e:
                        error_message = f"モデルの自動再読み込みに失敗しました: {str(e)}"
                        print(error_message)
                        return (error_message, None)
                else:
                    error_message = "モデルが無効です。Loaderノードでforce_reload=Trueにして再実行してください。"
                    print(error_message)
                    return (error_message, None)
            else:
                print("Model validation passed")

            # パラメータの正規化と範囲チェック
            max_tokens = max(1, min(max_tokens, 4096))
            temperature = max(0.0, min(temperature, 1.0))
            top_p = max(0.0, min(top_p, 1.0))
            top_k = max(0, min(top_k, 100))
            repeat_penalty = max(0.0, min(repeat_penalty, 2.0))

            # シードの設定
            if use_random_seed:
                seed = random.randint(1, 2**31 - 1)

            # プロンプトの構築（text1=指示、text2=内容を明確に分離）
            instruction = text1 if text1 else ""
            content = text2 if text2 else "A beautiful scene"
            
            # デフォルト指示を設定（ユーザー指示がない場合）
            if not instruction.strip():
                instruction = """You are a prompt generator for Stable Diffusion.

Task:
Convert the input description into ONE optimized English prompt for Stable Diffusion.
Rules:
- Output format: single line of comma-separated phrases only
- Stay faithful to the original description's content and intent
- Include: subject, setting/environment, lighting, mood/atmosphere, artistic style, quality tags
- Use descriptive English terms that accurately represent the input
- Do not censor or sanitize the content - convert faithfully
- End cleanly after final quality tag

Output:
<prompt>"""
            
            if not isinstance(instruction, str):
                instruction = str(instruction)
            if not isinstance(content, str):
                content = str(content)

            # ランダムなバリエーション用のリスト（適度に調整）
            variation_prompts = [
                "Focus on artistic composition and visual details.",
                "Emphasize mood and atmospheric elements.", 
                "Include creative styling and artistic elements.",
                "Add detailed lighting and visual effects."
            ]
            
            quality_tags = [
                "masterpiece, best quality, highly detailed",
                "high quality, detailed artwork", 
                "ultra detailed, premium quality",
                "best quality, intricate details"
            ]
            
            # 内部固定システムプロンプト
            system_prompt = "You are a creative writing assistant. Respond in English."
            
            # NSFWフィルター処理
            if nsfw_filter:
                # nsfw_filter=True の場合、有害コンテンツをブロック
                safety_note = " IMPORTANT: If inappropriate content, respond with 'I cannot generate prompts for inappropriate content.' Otherwise, respond ONLY in English."
                wrapped_prompt = f"""{system_prompt}

{instruction}{safety_note}

Input to convert: {content}

Response (English only):"""
            else:
                # nsfw_filter=False の場合、自由な表現を許可
                # 最小限のランダム性のみ追加
                if random.choice([True, False]):
                    quality_hint = random.choice(quality_tags)
                    wrapped_prompt = f"""{system_prompt}

{instruction}

Input to convert: {content}

Hint: Consider quality tags like "{quality_hint}"

Response (English only):"""
                else:
                    wrapped_prompt = f"""{system_prompt}

{instruction}

Input to convert: {content}

Response (English only):"""

            print(f"Starting text generation with prompt length: {len(wrapped_prompt)}")

            # テキスト生成処理
            try:
                response = model.create_completion(
                    prompt=wrapped_prompt,
                    max_tokens=max_tokens,
                    temperature=temperature,
                    top_k=top_k,
                    top_p=top_p,
                    repeat_penalty=repeat_penalty,
                    seed=seed,
                    stream=False,
                    echo=False,
                    stop=["</s>"]
                )
                
                if isinstance(response, dict) and "choices" in response:
                    text = response["choices"][0]["text"]
                else:
                    text = str(response)
                    
            except Exception as e:
                print(f"create_completion failed, trying __call__: {e}")
                try:
                    response = model(
                        wrapped_prompt,
                        max_tokens=max_tokens,
                        temperature=temperature,
                        top_k=top_k,
                        top_p=top_p,
                        repeat_penalty=repeat_penalty,
                        seed=seed,
                        stream=False,
                        echo=False,
                        stop=["</s>"]
                    )
                    
                    if isinstance(response, dict) and "choices" in response:
                        text = response["choices"][0]["text"]
                    else:
                        text = str(response)
                        
                except Exception as e2:
                    print(f"__call__ also failed: {e2}")
                    try:
                        tokens = model.tokenize(wrapped_prompt.encode('utf-8'))
                        response_tokens = []
                        
                        for token in model.generate(tokens, top_k=top_k, top_p=top_p, temp=temperature):
                            response_tokens.append(token)
                            if len(response_tokens) >= max_tokens:
                                break
                                
                        text = model.detokenize(response_tokens).decode('utf-8', errors='ignore')
                        
                    except Exception as e3:
                        print(f"All methods failed: {e3}")
                        # 最終的にエラーの場合、モデルを無効化してキャッシュをクリア
                        print("Model appears to be corrupted, clearing cache")
                        _clear_cached_model()
                        text = f"エラーが発生しました。Loaderノードでforce_reload=Trueにして再実行してください: {str(e3)}"

            # 不要な部分をクリーンアップ
            text = text.strip()
            for stop_token in ["</s>", "<eos>"]:
                if stop_token in text:
                    text = text.split(stop_token)[0]

            if not text or text.strip() == "":
                text = "レスポンスが生成されませんでした。"

            print(f"Generated text: {text[:100]}...")
            
            return (text, model)
            
        except Exception as e:
            print(f"Generate error: {e}")
            # エラーが発生した場合、キャッシュをクリア
            _clear_cached_model()
            error_message = f"エラーが発生しました。Loaderノードでforce_reload=Trueにして再実行してください: {str(e)}"
            return (error_message, model if 'model' in locals() else None)

class LlamaCppUnload:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("LLAMA_MODEL",),
                "force_gc": ("BOOLEAN", {"default": True}),
                "clear_cuda_cache": ("BOOLEAN", {"default": True}),
                "preserve_cache": ("BOOLEAN", {"default": True}),  # キャッシュ保持オプション
            },
            "optional": {
                "trigger": ("STRING", {"default": ""}),
            }
        }

    RETURN_TYPES = ("STRING", "LLAMA_MODEL")
    FUNCTION = "unload_model"
    RETURN_NAMES = ("status", "model")
    OUTPUT_NODE = True

    def unload_model(self, model, force_gc, clear_cuda_cache, preserve_cache, trigger=""):
        global _cached_model, _model_cache_key
        
        try:
            status_messages = []
            
            if model is None:
                status_messages.append("No model to unload")
                return ("\n".join(status_messages), None)
            
            print(f"Starting model unload process. Model type: {type(model)}")
            
            # 1. Llamaモデル固有のクリーンアップ
            try:
                if hasattr(model, 'close'):
                    model.close()
                    status_messages.append("Model.close() executed")
                
                # CUDA contextsの強制クリア
                if hasattr(model, '_ctx') and model._ctx is not None:
                    try:
                        del model._ctx
                        status_messages.append("Model context cleared")
                    except:
                        pass
                        
            except Exception as e:
                status_messages.append(f"Model cleanup error: {str(e)}")
            
            # 2. キャッシュの処理
            if not preserve_cache:
                _clear_cached_model()
                status_messages.append("Model cache cleared")
            else:
                status_messages.append("Model cache preserved for reuse")
            
            # 3. ComfyUIのメモリ管理
            try:
                unload_all_models()
                soft_empty_cache()
                status_messages.append("ComfyUI memory management executed")
            except Exception as e:
                status_messages.append(f"ComfyUI cleanup error: {str(e)}")
            
            # 4. CUDA キャッシュクリア
            if clear_cuda_cache:
                try:
                    import torch
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
                        torch.cuda.synchronize()
                        status_messages.append("CUDA cache cleared")
                    else:
                        status_messages.append("CUDA not available")
                except ImportError:
                    status_messages.append("PyTorch not available for CUDA cleanup")
                except Exception as e:
                    status_messages.append(f"CUDA cleanup error: {str(e)}")
            
            # 5. ガベージコレクション
            if force_gc:
                total_collected = 0
                for i in range(3):  # より多くのGCサイクル
                    collected = gc.collect()
                    total_collected += collected
                
                status_messages.append(f"Total GC: {total_collected} objects collected")
            
            # 6. 追加のCUDA メモリ強制解放
            try:
                import torch
                if torch.cuda.is_available():
                    # 全てのCUDAデバイスのメモリを解放
                    for i in range(torch.cuda.device_count()):
                        with torch.cuda.device(i):
                            torch.cuda.empty_cache()
                    torch.cuda.synchronize()
                    status_messages.append("All CUDA devices memory cleared")
            except:
                pass
            
            final_status = f"VRAM release completed ({'cache preserved' if preserve_cache else 'cache cleared'})"
            status_messages.append(final_status)
            
            result = "\n".join(status_messages)
            print("=== Unload Status ===")
            print(result)
            print("====================")
            
            return (result, None)  # モデルをNoneに設定して確実に解放
            
        except Exception as e:
            error_message = f"Error during VRAM release: {str(e)}"
            print(error_message)
            return (error_message, None)

class LlamaCppMemoryInfo:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {},
            "optional": {
                "trigger": ("STRING", {"default": ""}),
            }
        }

    RETURN_TYPES = ("STRING",)
    FUNCTION = "get_memory_info"
    RETURN_NAMES = ("memory_info",)

    def get_memory_info(self, trigger=""):
        global _cached_model, _model_cache_key, _last_loader_settings
        
        try:
            memory_info = []
            
            # キャッシュ状態
            if _cached_model is not None:
                memory_info.append(f"Model Cache: Active ({_model_cache_key})")
                # キャッシュされたモデルの有効性もチェック
                if _validate_cached_model(_cached_model):
                    memory_info.append("Cached Model Status: Valid")
                else:
                    memory_info.append("Cached Model Status: Invalid (needs reload)")
            else:
                memory_info.append("Model Cache: Empty")
            
            if _last_loader_settings:
                memory_info.append(f"Last Settings: {_last_loader_settings}")
            else:
                memory_info.append("Last Settings: None")
            
            # CUDA メモリ情報
            try:
                import torch
                if torch.cuda.is_available():
                    for i in range(torch.cuda.device_count()):
                        allocated = torch.cuda.memory_allocated(i) / 1024**3
                        reserved = torch.cuda.memory_reserved(i) / 1024**3
                        total = torch.cuda.get_device_properties(i).total_memory / 1024**3
                        free = total - reserved
                        memory_info.append(f"GPU {i}: {allocated:.2f}GB allocated, {reserved:.2f}GB reserved, {free:.2f}GB free, {total:.2f}GB total")
                else:
                    memory_info.append("CUDA not available")
            except ImportError:
                memory_info.append("PyTorch not available")
            
            # システムメモリ情報
            try:
                import psutil
                memory = psutil.virtual_memory()
                memory_info.append(f"System RAM: {memory.used/1024**3:.2f}GB used, {memory.available/1024**3:.2f}GB available, {memory.total/1024**3:.2f}GB total ({memory.percent:.1f}%)")
            except ImportError:
                memory_info.append("psutil not available for system memory info")
            
            # ガベージコレクション情報
            try:
                gc_stats = gc.get_stats()
                total_objects = sum(stat['collections'] for stat in gc_stats)
                memory_info.append(f"GC: {total_objects} total collections, {len(gc.get_objects())} tracked objects")
            except:
                pass
            
            result = "\n".join(memory_info)
            print("Memory Info:")
            print(result)
            return (result,)
            
        except Exception as e:
            error_message = f"Error getting memory info: {str(e)}"
            print(error_message)
            return (error_message,)

class LlamaCppSafeUnload:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("LLAMA_MODEL",),
            },
            "optional": {
                "trigger": ("STRING", {"default": ""}),
            }
        }

    RETURN_TYPES = ("STRING", "LLAMA_MODEL")
    FUNCTION = "safe_unload"
    RETURN_NAMES = ("status", "model")
    OUTPUT_NODE = True

    def safe_unload(self, model, trigger=""):
        try:
            status_messages = []
            
            if model is None:
                status_messages.append("No model to process")
                return ("\n".join(status_messages), None)
            
            # 最小限のメモリ管理のみ実行
            try:
                # ComfyUIの標準メモリ管理のみ使用
                soft_empty_cache()
                status_messages.append("ComfyUI soft cache cleared")
                
                # PyTorchのCUDAキャッシュのみクリア
                try:
                    import torch
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
                        status_messages.append("CUDA cache cleared")
                except:
                    status_messages.append("CUDA cache clear skipped")
                
                # 軽いガベージコレクション
                collected = gc.collect()
                if collected > 0:
                    status_messages.append(f"GC: {collected} objects collected")
                
                status_messages.append("Safe memory cleanup completed")
                status_messages.append("Model preserved for continued use")
                
            except Exception as e:
                status_messages.append(f"Safe cleanup error: {str(e)}")
                print(f"Safe cleanup error: {str(e)}")
            
            result = "\n".join(status_messages)
            print("=== Safe Unload Status ===")
            print(result)
            print("=========================")
            
            return (result, model)
            
        except Exception as e:
            error_message = f"Error during safe cleanup: {str(e)}"
            print(error_message)
            return (error_message, model if 'model' in locals() else None)

class LlamaCppCompleteUnload:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("LLAMA_MODEL",),
                "force_gc": ("BOOLEAN", {"default": True}),
                "clear_cuda_cache": ("BOOLEAN", {"default": True}),
                "clear_cache": ("BOOLEAN", {"default": False}),  # キャッシュクリア制御
            },
            "optional": {
                "trigger": ("STRING", {"default": ""}),
            }
        }

    RETURN_TYPES = ("STRING",)
    FUNCTION = "complete_unload"
    RETURN_NAMES = ("status",)
    OUTPUT_NODE = True

    def complete_unload(self, model, force_gc, clear_cuda_cache, clear_cache, trigger=""):
        global _cached_model, _model_cache_key
        
        try:
            status_messages = []
            
            if model is None:
                status_messages.append("No model to unload")
                return ("\n".join(status_messages),)
            
            print(f"Starting complete model unload. Model type: {type(model)}")
            
            # モデルの完全削除（オプショナル）
            try:
                if hasattr(model, 'close'):
                    model.close()
                    status_messages.append("Model.close() executed")
                
                # モデルオブジェクトの削除
                del model
                status_messages.append("Model object deleted")
                
            except Exception as e:
                status_messages.append(f"Model cleanup error: {str(e)}")
                print(f"Model cleanup error: {str(e)}")
            
            # キャッシュの処理
            if clear_cache:
                _clear_cached_model()
                status_messages.append("Model cache completely cleared")
            else:
                status_messages.append("Model cache preserved for auto-reload")
            
            # メモリクリーンアップ
            unload_all_models()
            soft_empty_cache()
            status_messages.append("ComfyUI memory management executed")
            
            if force_gc:
                total_collected = 0
                for i in range(3):
                    collected = gc.collect()
                    total_collected += collected
                status_messages.append(f"Total GC: {total_collected} objects collected")
                
            if clear_cuda_cache:
                try:
                    import torch
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
                        torch.cuda.synchronize()
                        status_messages.append("CUDA cache cleared")
                except:
                    pass
            
            result = "\n".join(status_messages)
            print("=== Complete Unload Status ===")
            print(result)
            print("==============================")
            
            return (result,)
            
        except Exception as e:
            error_message = f"Error during complete unload: {str(e)}"
            print(error_message)
            return (error_message,)

class LlamaCppAIO:
    """All-In-One LlamaCpp node with JavaScript UI (Loader+Generate+Unload)"""
    
    @classmethod
    def INPUT_TYPES(cls):
        llm_root = Path(models_dir) / "llm"
        
        # ディレクトリが存在することを確認（ない場合は作成）
        _ensure_llm_directory_exists()
        
        model_files = sorted(str(p) for p in llm_root.rglob("*.gguf"))
        
        # モデルファイルが見つからない場合は親切なメッセージを追加
        if not model_files:
            model_files = [
                "❌ モデルファイル(.gguf)が見つかりません",
                "📁 models/llm フォルダを確認してください", 
                "🔗 詳細は「📋_モデル配置ガイド.txt」を参照"
            ]
            default = model_files[0]
        else:
            default = model_files[0]
        
        return {
            "required": {
                # 1行目: 入出力端子（自動）
                
                # 2行目: 指示プロンプト
                "instruction_prompt": ("STRING", {
                    "default": "You are a prompt generator for Stable Diffusion.\n\nTask:\nConvert the input description into ONE optimized English prompt for Stable Diffusion.\nRules:\n- Output format: single line of comma-separated phrases only\n- Stay faithful to the original description's content and intent\n- Include: subject, setting/environment, lighting, mood/atmosphere, artistic style, quality tags\n- Use descriptive English terms that accurately represent the input\n- Do not censor or sanitize the content - convert faithfully\n- End cleanly after final quality tag\n\nOutput:\n<prompt>", 
                    "multiline": True
                }),
                
                # 3行目: 入力テキスト
                "input_text": ("STRING", {
                    "default": "プールサイドで寝そべっている女性", 
                    "multiline": True
                }),
                
                # 4行目: 📺 生成結果ビューワ（新規追加）
                "generated_result": ("STRING", {
                    "multiline": True, 
                    "default": "", 
                    "placeholder": "生成結果がここに表示されます..."
                }),
                
                # 5行目: [Generate][Stop] ボタン（JavaScript追加）
                # これらは内部制御用
                "execute_generate": ("BOOLEAN", {"default": False}),
                "execute_stop": ("BOOLEAN", {"default": False}),
                
                # 6行目以降: パラメータ群
                # ローダー設定
                "model_path": (model_files, {"default": default}),
                "n_ctx": ("INT", {"default": 2048, "min": 1, "max": 65536, "step": 256}),
                "n_gpu_layers": ("INT", {"default": 40, "min": 0, "max": 128}),
                "force_reload": ("BOOLEAN", {"default": False}),
                
                # 生成パラメータ
                "max_tokens": ("INT", {"default": 128, "min": 1, "max": 4096}),
                "temperature": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0}),
                "top_k": ("INT", {"default": 40, "min": 0, "max": 100}),
                "top_p": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 1.0}),
                "repeat_penalty": ("FLOAT", {"default": 1.1, "min": 0.0, "max": 2.0}),
                "use_random_seed": ("BOOLEAN", {"default": True}),
                "seed": ("INT", {"default": 0, "min": 0}),
                "nsfw_filter": ("BOOLEAN", {"default": True}),
                "auto_unload": ("BOOLEAN", {"default": True}),
            }
        }

    RETURN_TYPES = ("STRING", "STRING")
    FUNCTION = "process"
    RETURN_NAMES = ("generated_text", "status")
    CATEGORY = "Llama-CPP"

    def process(self, instruction_prompt, input_text, generated_result, execute_generate, 
                execute_stop, model_path, n_ctx, n_gpu_layers, force_reload, max_tokens, 
                temperature, top_k, top_p, repeat_penalty, use_random_seed, seed, 
                nsfw_filter, auto_unload):
        """Process all-in-one llama operations"""
        
        try:
            status_messages = []
            
            # Stop requested
            if execute_stop:
                print("=== Stop Requested ===")
                status_messages.append("🛑 Generation stopped by user")
                _cleanup_memory()
                return (generated_result, "\n".join(status_messages))
            
            # Generate only if requested
            if not execute_generate:
                return (generated_result, "Waiting for generate command...")
            
            print("=== Starting AIO Generation ===")
            status_messages.append("🚀 Starting generation...")
            
            # Step 1: Load Model
            print("=== Loading Model ===")
            status_messages.append("📦 Loading model...")
            
            try:
                llm = _get_or_create_model(model_path, n_ctx, n_gpu_layers, force_reload)
                status_messages.append(f"✅ Model loaded: {os.path.basename(model_path)}")
                print(f"Model loaded successfully: {model_path}")
            except Exception as e:
                error_msg = f"Model loading failed: {str(e)}"
                status_messages.append(f"❌ {error_msg}")
                print(f"Model loading error: {e}")
                return (generated_result, "\n".join(status_messages))
            
            # Step 2: Generate Text
            print("=== Generating Text ===")
            status_messages.append("🎯 Generating text...")
            
            try:
                # パラメータの正規化
                max_tokens = max(1, min(max_tokens, 4096))
                temperature = max(0.0, min(temperature, 1.0))
                top_p = max(0.0, min(top_p, 1.0))
                top_k = max(0, min(top_k, 100))
                repeat_penalty = max(0.0, min(repeat_penalty, 2.0))

                # シードの設定
                if use_random_seed:
                    seed = random.randint(1, 2**31 - 1)

                # プロンプトの構築（指示と内容を明確に分離）
                instruction = instruction_prompt if instruction_prompt else ""
                content = input_text if input_text else "A beautiful scene"
                
                # デフォルト指示を設定（ユーザー指示がない場合）
                if not instruction.strip():
                    instruction = "Convert the following description to a Stable Diffusion prompt using English words separated by commas, including artistic styles and quality tags."
                
                if not isinstance(instruction, str):
                    instruction = str(instruction)
                if not isinstance(content, str):
                    content = str(content)

                # ランダムなバリエーション用のリスト（適度に調整）
                variation_prompts = [
                    "Focus on artistic composition and visual details.",
                    "Emphasize mood and atmospheric elements.", 
                    "Include creative styling and artistic elements.",
                    "Add detailed lighting and visual effects."
                ]
                
                quality_tags = [
                    "masterpiece, best quality, highly detailed",
                    "high quality, detailed artwork", 
                    "ultra detailed, premium quality",
                    "best quality, intricate details"
                ]
                
                # 内部固定システムプロンプト
                system_prompt = "You are an English-only AI assistant. Always respond in English."
                
                # NSFWフィルター処理
                if nsfw_filter:
                    # nsfw_filter=True の場合、有害コンテンツをブロック
                    safety_note = " IMPORTANT: If inappropriate content, respond with 'I cannot generate prompts for inappropriate content.' Otherwise, respond ONLY in English."
                    wrapped_prompt = f"""{system_prompt}

{instruction}{safety_note}

Input to convert: {content}

Response (English only):"""
                else:
                    # nsfw_filter=False の場合、自由な表現を許可
                    # 最小限のランダム性のみ追加
                    if random.choice([True, False]):
                        quality_hint = random.choice(quality_tags)
                        wrapped_prompt = f"""{system_prompt}

{instruction}

Input to convert: {content}

Hint: Consider quality tags like "{quality_hint}"

Response (English only):"""
                    else:
                        wrapped_prompt = f"""{system_prompt}

{instruction}

Input to convert: {content}

Response (English only):"""

                print(f"Starting text generation with prompt length: {len(wrapped_prompt)}")

                # テキスト生成
                response = llm.create_completion(
                    prompt=wrapped_prompt,
                    max_tokens=max_tokens,
                    temperature=temperature,
                    top_k=top_k,
                    top_p=top_p,
                    repeat_penalty=repeat_penalty,
                    seed=seed,
                    stream=False,
                    echo=False,
                    stop=["</s>"]
                )
                
                if isinstance(response, dict) and "choices" in response:
                    result_text = response["choices"][0]["text"]
                else:
                    result_text = str(response)
                
                # クリーンアップ
                result_text = result_text.strip()
                for stop_token in ["</s>", "<eos>"]:
                    if stop_token in result_text:
                        result_text = result_text.split(stop_token)[0]

                if not result_text or result_text.strip() == "":
                    result_text = "レスポンスが生成されませんでした。"
                
                status_messages.append(f"✅ Generated {len(result_text)} characters")
                print(f"Generated text: {result_text[:100]}...")
                
            except Exception as e:
                error_msg = f"Text generation failed: {str(e)}"
                status_messages.append(f"❌ {error_msg}")
                print(f"Generation error: {e}")
                result_text = f"Error: {error_msg}"
            
            # Step 3: Auto Unload (optional)
            if auto_unload:
                print("=== Auto Unloading ===")
                status_messages.append("🧹 Auto unloading VRAM...")
                try:
                    _cleanup_memory()
                    status_messages.append("✅ VRAM cleaned up")
                except Exception as e:
                    status_messages.append(f"⚠️  Cleanup warning: {str(e)}")
            
            final_status = "\n".join(status_messages)
            print("=== AIO Generation Completed ===")
            print(final_status)
            
            return (result_text, final_status)
            
        except Exception as e:
            error_message = f"AIO Process Error: {str(e)}"
            print(error_message)
            return (generated_result, error_message)


NODE_CLASS_MAPPINGS = {
    "LlamaCppLoader": LlamaCppLoader,
    "LlamaCppGenerate": LlamaCppGenerate,
    "LlamaCppUnload": LlamaCppUnload,
    "LlamaCppSafeUnload": LlamaCppSafeUnload,
    "LlamaCppMemoryInfo": LlamaCppMemoryInfo,
    "LlamaCppCompleteUnload": LlamaCppCompleteUnload,
    "LlamaCppAIO": LlamaCppAIO,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "LlamaCppLoader": "Llama-CPP Loader",
    "LlamaCppGenerate": "Llama-CPP Generate",
    "LlamaCppUnload": "Llama-CPP Unload VRAM",
    "LlamaCppSafeUnload": "Llama-CPP Safe Unload",
    "LlamaCppMemoryInfo": "Llama-CPP Memory Info",
    "LlamaCppCompleteUnload": "Llama-CPP Complete Unload",
    "LlamaCppAIO": "🚀 Llama-CPP All-In-One",
}