import os
import yaml
import torch
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GenerationConfig
from peft import PeftModel, PeftConfig
from loguru import logger
import time
from backend.common.config import config_manager
from backend.common.config import config

class ModelManager:
    _instance = None
    _initialized = False

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(ModelManager, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        if self._initialized:
            return
        
        # 使用项目中已有的配置实例
        self.model_config = config.model.model_dump()
        self.learning_config = {}
        
        self.tokenizer = None
        self.model = None
        self.lora_model = None
        self.is_model_loaded = False
        self.load_time = 0
        
        # 检查是否使用外部LLM提供商（如Ollama）
        try:
            active_provider = config.llm_providers.active_provider
            if active_provider != 'local':
                logger.info(f"使用外部LLM提供商: {active_provider}，跳过本地模型加载")
                self.is_model_loaded = True  # 标记为已加载，避免API调用失败
                self._initialized = True
                return
        except Exception as e:
            logger.warning(f"检查LLM提供商配置失败: {str(e)}，继续加载本地模型")
        
        # 初始化模型
        self._load_model()
        
        self._initialized = True
    
    def _load_model(self):
        """加载大语言模型和分词器"""
        try:
            start_time = time.time()
            logger.info(f"开始加载模型: {self.model_config['name']}")
            
            # 如果有指定的模型路径，使用本地路径
            model_path = self.model_config['model_path'] or self.model_config['name']
            tokenizer_path = self.model_config['tokenizer_path'] or model_path
            
            # 加载分词器
            logger.info(f"加载分词器: {tokenizer_path}")
            self.tokenizer = AutoTokenizer.from_pretrained(
                tokenizer_path,
                cache_dir=self.model_config['cache_dir'],
                trust_remote_code=True
            )
            
            # 设置量化配置
            quantization_config = None
            if self.model_config['quantized']:
                if self.model_config['quantization_type'] == 'gptq':
                    # GPTQ量化配置
                    logger.info(f"使用GPTQ {self.model_config['quantization_bits']}bit量化")
                    from transformers import GPTQConfig
                    quantization_config = GPTQConfig(
                        bits=self.model_config['quantization_bits'],
                        disable_exllama=False,
                        tokenizer=self.tokenizer
                    )
                elif self.model_config['quantization_type'] == 'bnb':
                    # BitsAndBytes量化配置
                    logger.info(f"使用BitsAndBytes {self.model_config['quantization_bits']}bit量化")
                    quantization_config = BitsAndBytesConfig(
                        load_in_4bit=self.model_config['quantization_bits'] == 4,
                        load_in_8bit=self.model_config['quantization_bits'] == 8,
                        bnb_4bit_use_double_quant=True,
                        bnb_4bit_quant_type="nf4",
                        bnb_4bit_compute_dtype=torch.float16
                    )
            
            # 加载模型
            logger.info(f"加载模型: {model_path}")
            self.model = AutoModelForCausalLM.from_pretrained(
                model_path,
                cache_dir=self.model_config['cache_dir'],
                quantization_config=quantization_config,
                device_map=self.model_config['device_map'],
                torch_dtype=torch.float16,
                trust_remote_code=True,
                low_cpu_mem_usage=True
            )
            
            # 如果启用了LoRA，加载LoRA适配器
            lora_cfg = self.model_config.get('lora', {}) or {}
            if lora_cfg.get('enabled') and lora_cfg.get('adapter_name'):
                self._load_lora_adapter(lora_cfg.get('adapter_name'))
            
            self.is_model_loaded = True
            self.load_time = time.time() - start_time
            logger.info(f"模型加载完成，耗时: {self.load_time:.2f}秒")
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            self.is_model_loaded = False
            # 在实际生产环境中，可能需要添加重试机制或其他处理逻辑
    
    def _load_lora_adapter(self, adapter_name):
        """加载LoRA适配器"""
        try:
            lora_cfg = self.model_config.get('lora', {}) or {}
            adapter_path = os.path.join(lora_cfg.get('adapter_path', ''), adapter_name)
            logger.info(f"加载LoRA适配器: {adapter_path}")
            
            # 检查适配器是否存在
            if not os.path.exists(adapter_path):
                logger.warning(f"LoRA适配器不存在: {adapter_path}")
                return
            
            # 加载LoRA适配器
            self.lora_model = PeftModel.from_pretrained(
                self.model,
                adapter_path,
                torch_dtype=torch.float16,
                device_map=self.model_config['device_map']
            )
            
            # 合并LoRA权重（可选，取决于是否需要）
            # self.model = self.lora_model.merge_and_unload()
            
            logger.info(f"LoRA适配器加载完成: {adapter_name}")
        except Exception as e:
            logger.error(f"LoRA适配器加载失败: {str(e)}")
    
    def generate(self, prompt, max_new_tokens=None, temperature=None, top_p=None, use_streaming=None):
        """生成文本"""
        if not self.is_model_loaded:
            logger.error("模型未加载，无法生成文本")
            return "模型未加载，请稍后再试"
        
        try:
            # 使用配置中的参数或传入的参数
            max_new_tokens = max_new_tokens or self.model_config['max_new_tokens']
            temperature = temperature or self.model_config['temperature']
            top_p = top_p or self.model_config['top_p']
            use_streaming = use_streaming if use_streaming is not None else self.model_config['use_streaming']
            
            # 准备输入
            inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
            
            # 设置生成配置
            generation_config = GenerationConfig(
                max_new_tokens=max_new_tokens,
                temperature=temperature,
                top_p=top_p,
                do_sample=True,
                eos_token_id=self.tokenizer.eos_token_id,
                pad_token_id=self.tokenizer.pad_token_id,
            )
            
            # 使用当前模型（可能是基础模型或LoRA模型）
            current_model = self.lora_model if self.lora_model is not None else self.model
            
            if use_streaming:
                # 流式生成
                return self._generate_stream(current_model, inputs, generation_config)
            else:
                # 非流式生成
                start_time = time.time()
                with torch.no_grad():
                    outputs = current_model.generate(
                        **inputs,
                        generation_config=generation_config
                    )
                
                # 解码输出
                generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
                
                # 计算生成时间
                generation_time = time.time() - start_time
                logger.info(f"文本生成完成，耗时: {generation_time:.2f}秒，生成token数: {len(outputs[0]) - len(inputs['input_ids'][0])}")
                
                return generated_text
        except Exception as e:
            logger.error(f"文本生成失败: {str(e)}")
            return f"生成失败: {str(e)}"
    
    def _generate_stream(self, model, inputs, generation_config):
        """流式生成文本"""
        try:
            # 用于记录生成开始时间
            start_time = time.time()
            generated_tokens = []
            
            # 设置流式生成
            with torch.no_grad():
                for output in model.generate(
                    **inputs,
                    generation_config=generation_config,
                    streamer=TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
                ):
                    # 解码当前生成的token
                    current_token = self.tokenizer.decode(output, skip_special_tokens=True)
                    generated_tokens.append(current_token)
                    yield current_token
            
            # 计算生成时间和生成的token数
            generation_time = time.time() - start_time
            total_tokens = len(generated_tokens)
            logger.info(f"流式生成完成，耗时: {generation_time:.2f}秒，生成token数: {total_tokens}")
        except Exception as e:
            logger.error(f"流式生成失败: {str(e)}")
            yield f"生成失败: {str(e)}"
    
    def update_lora_adapter(self, adapter_name):
        """热更新LoRA适配器"""
        logger.info(f"开始热更新LoRA适配器: {adapter_name}")
        try:
            # 保存当前模型配置
            lora_cfg = self.model_config.get('lora', {}) or {}
            old_adapter_name = lora_cfg.get('adapter_name')
            
            # 更新配置
            self.model_config.setdefault('lora', {})
            self.model_config['lora']['adapter_name'] = adapter_name
            
            # 重新加载LoRA适配器
            self._load_lora_adapter(adapter_name)
            
            logger.info(f"LoRA适配器热更新成功: 从 '{old_adapter_name}' 更新为 '{adapter_name}'")
            return True
        except Exception as e:
            logger.error(f"LoRA适配器热更新失败: {str(e)}")
            # 如果更新失败，恢复原来的配置
            self.model_config['lora']['adapter_name'] = old_adapter_name
            return False
    
    def get_model_info(self):
        """获取模型信息"""
        lora_cfg = self.model_config.get('lora', {}) or {}
        quantized = bool(self.model_config.get('quantized'))
        return {
            'name': self.model_config.get('name'),
            'is_loaded': self.is_model_loaded,
            'load_time': self.load_time,
            'quantized': quantized,
            'quantization_type': self.model_config.get('quantization_type') if quantized else None,
            'quantization_bits': self.model_config.get('quantization_bits') if quantized else None,
            'lora_enabled': bool(lora_cfg.get('enabled')),
            'lora_adapter_name': lora_cfg.get('adapter_name') if lora_cfg.get('enabled') else None,
            'device': str(self.model.device) if self.is_model_loaded and getattr(self, 'model', None) is not None else None
        }
    
    def unload_model(self):
        """卸载模型以释放内存"""
        if self.is_model_loaded:
            try:
                del self.model
                del self.lora_model
                self.model = None
                self.lora_model = None
                self.is_model_loaded = False
                
                # 清理GPU内存
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                    torch.cuda.ipc_collect()
                
                logger.info("模型已卸载，内存已释放")
            except Exception as e:
                logger.error(f"模型卸载失败: {str(e)}")
    
    def reload_model(self):
        """重新加载模型"""
        logger.info("开始重新加载模型")
        self.unload_model()
        self._load_model()
        return self.is_model_loaded

# 导入流式生成器
from transformers import TextIteratorStreamer

# 全局模型管理器实例
model_manager = ModelManager()