from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
)
import torch
import random
import numpy as np
from typing import Dict, Any

class BaseLLMNode:
    """所有 LLM 節點的基礎類別"""
    
    def __init__(self):
        self.model_cache: Dict[str, Any] = {}
        self.tokenizer_cache: Dict[str, Any] = {}
    
    def _load_model(self, model_id: str, force_reload: bool = False):
        """載入模型和 tokenizer"""
        if force_reload and model_id in self.model_cache:
            print(f"🔄 Force reloading model: {model_id}")
            # 清理舊模型
            try:
                del self.model_cache[model_id]
                del self.tokenizer_cache[model_id]
                import gc
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                gc.collect()
            except:
                pass
        
        if model_id not in self.model_cache:
            print(f"Loading model: {model_id}")
            
            # 載入 tokenizer
            tokenizer = AutoTokenizer.from_pretrained(
                model_id, 
                trust_remote_code=True,
                local_files_only=False
            )
            
            # 修復 pad_token
            if tokenizer.pad_token is None:
                if tokenizer.eos_token:
                    tokenizer.pad_token = tokenizer.eos_token
                else:
                    tokenizer.add_special_tokens({'pad_token': '[PAD]'})
            
            # 載入模型 - 嘗試使用 Flash Attention
            model_kwargs = {
                "device_map": "auto",
                "torch_dtype": torch.float16,
                "trust_remote_code": True,
                "local_files_only": False
            }
            
            # 檢查 Flash Attention 可用性並設定
            try:
                import flash_attn
                print(f"✅ Flash Attention available: {flash_attn.__version__}")
                
                # 針對不同模型設定 attention 實現
                if "phi-4" in model_id.lower():
                    print("⚠️  Phi-4-mini: forcing eager attention for stability")
                    model_kwargs["attn_implementation"] = "eager"
                else:
                    print("🚀 Using flash_attention_2 for better performance")
                    model_kwargs["attn_implementation"] = "flash_attention_2"
                    
            except ImportError:
                print("❌ Flash Attention not found, using default attention")
                if "phi-4" in model_id.lower():
                    model_kwargs["attn_implementation"] = "eager"
            
            model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
            
            # 快取
            self.model_cache[model_id] = model
            self.tokenizer_cache[model_id] = tokenizer
            print(f"Model loaded successfully: {model_id}")
        
        return self.model_cache[model_id], self.tokenizer_cache[model_id]
    
    def clear_model_cache(self, selective: bool = True):
        """智能清理模型緩存
        
        Args:
            selective: 如果為True，只清理GPU緩存不清理模型；如果為False，完全清理
        """
        if selective:
            # 只清理GPU緩存，保留已載入的模型以提高效率
            print("🧹 Clearing GPU cache only (keeping models loaded)...")
            try:
                import gc
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                gc.collect()
                print("✅ GPU cache cleared successfully")
            except Exception as e:
                print(f"⚠️ Error clearing GPU cache: {e}")
        else:
            # 完全清理模型緩存（只在出現錯誤時使用）
            print("🧹 Clearing complete model cache...")
            try:
                self.model_cache.clear()
                self.tokenizer_cache.clear()
                import gc
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                gc.collect()
                print("✅ Complete model cache cleared successfully")
            except Exception as e:
                print(f"⚠️ Error clearing complete cache: {e}")
    
    def _get_model_max_tokens(self, model_name: str) -> int:
        """根據模型名稱返回建議的最大輸出token數"""
        model_name_lower = model_name.lower()
        
        # 模型特定的token限制配置
        model_limits = {
            "qwen": 4096,      # Qwen 2.5: 32K context
            "llama": 8192,     # Llama 3.1: 128K context  
            "mistral": 4096,   # Mistral 7B: 32K context
            "gemma": 2048,     # Gemma 2: 8K context
            "phi": 8192,       # Phi 3.5: 128K context
            "yi": 4096,        # Yi: 32K context
        }
        
        # 查找匹配的模型類型
        for model_type, limit in model_limits.items():
            if model_type in model_name_lower:
                return limit
        
        # 默認限制
        return 2048

    def _generate(self, model, tokenizer, prompt, max_new_tokens, temperature, seed, 
                  top_k=50, top_p=0.9, repetition_penalty=1.1, length_penalty=1.0, 
                  no_repeat_ngram_size=3, diversity_penalty=0.0, num_beams=1, 
                  do_sample=True, early_stopping=False):
        """生成文本的核心邏輯 - 增強參數版本"""
        # 設定隨機種子
        torch.manual_seed(seed)
        random.seed(seed)
        np.random.seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(seed)
        
        # 動態調整max_new_tokens限制
        model_name = getattr(tokenizer, 'name_or_path', '')
        model_max_tokens = self._get_model_max_tokens(model_name)
        max_new_tokens = min(max_new_tokens, model_max_tokens)
        
        print(f"Model: {model_name}, Max tokens limit: {model_max_tokens}, Using: {max_new_tokens}")
        
        # 檢查是否為 Phi-4-mini
        model_name = getattr(tokenizer, 'name_or_path', '')
        is_phi4 = 'phi-4' in model_name.lower()
        
        if is_phi4:
            print("⚠️  Phi-4-mini detected: using special tokenization")
            # 對 Phi-4-mini 使用完整的 tokenization
            inputs = tokenizer(
                prompt,
                return_tensors="pt",
                add_special_tokens=True,
                padding=True,
                truncation=True,
                max_length=2048
            )
            input_ids = inputs["input_ids"].to(model.device)
            attention_mask = inputs["attention_mask"].to(model.device)
            
            print(f"Input length: {input_ids.shape[1]} tokens")
            
            # 嘗試多種生成策略 - 使用傳入的參數
            strategies = [
                {
                    "do_sample": do_sample if temperature > 0 else False, 
                    "temperature": max(temperature, 0.1), 
                    "top_k": top_k, 
                    "top_p": top_p,
                    "repetition_penalty": repetition_penalty,
                    "no_repeat_ngram_size": no_repeat_ngram_size,
                    "name": "enhanced_sampling"
                },
                {"do_sample": False, "name": "greedy_fallback"},
                {"do_sample": False, "num_beams": max(num_beams, 1), "name": "beam_fallback"},
            ]
            
            for strategy in strategies:
                try:
                    print(f"Trying strategy: {strategy['name']}")
                    gen_kwargs = {
                        "input_ids": input_ids,
                        "attention_mask": attention_mask,
                        "max_new_tokens": max_new_tokens,
                        "pad_token_id": tokenizer.pad_token_id or tokenizer.eos_token_id,
                        "eos_token_id": tokenizer.eos_token_id,
                        "use_cache": False,  # 禁用緩存避免問題
                    }
                    gen_kwargs.update({k: v for k, v in strategy.items() if k != 'name'})
                    
                    with torch.no_grad():
                        outputs = model.generate(**gen_kwargs)
                    
                    # 解碼新生成的 tokens
                    new_tokens = outputs[0][input_ids.shape[1]:]
                    result = tokenizer.decode(new_tokens, skip_special_tokens=True)
                    
                    print(f"✅ Strategy '{strategy['name']}' succeeded!")
                    print(f"Generated length: {len(result)} characters")
                    return result.strip()
                    
                except Exception as e:
                    print(f"❌ Strategy '{strategy['name']}' failed: {e}")
                    continue
            
            # 所有策略都失敗
            return "Phi-4-mini generation failed with all strategies. Please try a different model."
            
        else:
            # 其他模型使用增強參數處理
            inputs = tokenizer.encode(prompt, return_tensors="pt")
            inputs = inputs.to(model.device)
            
            print(f"Input length: {inputs.shape[1]} tokens")
            
            gen_kwargs = {
                "max_new_tokens": max_new_tokens,
                "temperature": temperature,
                "do_sample": do_sample if temperature > 0 else False,
                "top_k": top_k if do_sample else None,
                "top_p": top_p if do_sample else None,
                "repetition_penalty": repetition_penalty,
                "length_penalty": length_penalty,
                "no_repeat_ngram_size": no_repeat_ngram_size,
                "num_beams": num_beams,
                "early_stopping": early_stopping,
                "pad_token_id": tokenizer.pad_token_id or tokenizer.eos_token_id,
                "eos_token_id": tokenizer.eos_token_id,
            }
            
            # 移除 None 值的參數
            gen_kwargs = {k: v for k, v in gen_kwargs.items() if v is not None}
            
            # 生成
            with torch.no_grad():
                outputs = model.generate(inputs, **gen_kwargs)
            
            # 解碼新生成的 tokens
            new_tokens = outputs[0][inputs.shape[1]:]
            result = tokenizer.decode(new_tokens, skip_special_tokens=True)
            
            print(f"Generated length: {len(result)} characters")
            return result.strip()
    

    @classmethod
    def _common_input_types(cls, default_model_path: str, default_prompt: str):
        """共用的輸入類型定義 - 核心參數版"""
        return {
            "required": {
                "prompt": ("STRING", {"multiline": True, "default": default_prompt}),
                "system_prompt": ("STRING", {"multiline": True, "default": "You are a helpful assistant."}),
                "model_id": ("STRING", {"multiline": False, "default": default_model_path}),
                "max_new_tokens": ("INT", {"default": 256, "min": 1, "max": 4096}),
                "temperature": ("FLOAT", {"default": 0.7, "min": 0.01, "max": 2.0}),
                "seed": ("INT", {"default": 42, "min": 1, "max": 2147483647}),
            },
            "optional": {
                # 核心採樣參數
                "top_k": ("INT", {"default": 50, "min": 1, "max": 100}),
                "top_p": ("FLOAT", {"default": 0.9, "min": 0.1, "max": 1.0}),
                "repetition_penalty": ("FLOAT", {"default": 1.1, "min": 1.0, "max": 1.5}),
                "length_penalty": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 2.0}),
                "no_repeat_ngram_size": ("INT", {"default": 3, "min": 0, "max": 10}),
                
                # 束搜索參數
                "num_beams": ("INT", {"default": 1, "min": 1, "max": 10}),
                "early_stopping": ("BOOLEAN", {"default": False}),
                "do_sample": ("BOOLEAN", {"default": True}),
            }
        }
    
    def _handle_error(self, e: Exception) -> tuple:
        """統一的錯誤處理"""
        import traceback
        error_msg = f"Error: {str(e)}"
        print(f"Full error traceback:\n{traceback.format_exc()}")
        return (error_msg,)