import os
import yaml
import json
import uuid
import shutil
import torch
import pandas as pd
from datasets import Dataset, load_dataset
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    BitsAndBytesConfig,
    DataCollatorForLanguageModeling
)
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from loguru import logger
from backend.database.db import db_manager
from backend.models.model_loader import model_manager

class LearningManager:
    _instance = None
    _initialized = False

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(LearningManager, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        if self._initialized:
            return
        
        # 加载配置
        config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'configs', 'config.yaml')
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
            self.learning_config = config['learning']
            self.model_config = config['model']
        
        # 创建必要的目录
        self.training_data_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'data', 'training')
        self.output_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'models', 'lora_adapters')
        
        os.makedirs(self.training_data_dir, exist_ok=True)
        os.makedirs(self.output_dir, exist_ok=True)
        
        self._initialized = True
    
    def save_learning_data(self, user_query, assistant_response, corrected_response=None, feedback=None, is_positive=None, metadata=None):
        """保存用户纠错与标注数据"""
        try:
            # 检查学习功能是否启用
            if not bool(self.learning_config.get('enabled', False)):
                logger.warning("学习功能未启用")
                return False
            
            # 保存到数据库
            db_manager.insert_learning_data(
                user_query=user_query,
                assistant_response=assistant_response,
                corrected_response=corrected_response,
                feedback=feedback,
                is_positive=is_positive,
                metadata=metadata
            )
            
            logger.info("学习数据保存成功")
            return True
        except Exception as e:
            logger.error(f"学习数据保存失败: {str(e)}")
            return False
    
    def generate_training_dataset(self, dataset_name=None, max_samples=None):
        """生成JSONL格式的训练数据集"""
        try:
            # 检查学习功能是否启用
            if not bool(self.learning_config.get('enabled', False)):
                logger.warning("学习功能未启用")
                return None
            
            # 生成数据集名称
            if not dataset_name:
                dataset_name = f"training_data_{uuid.uuid4().hex[:8]}"
            
            # 获取未用于训练的学习数据
            max_samples = max_samples or self.learning_config['max_training_samples']
            learning_data = db_manager.get_unused_learning_data(limit=max_samples)
            
            if not learning_data:
                logger.warning("没有未用于训练的学习数据")
                return None
            
            logger.info(f"获取到 {len(learning_data)} 条未用于训练的学习数据")
            
            # 生成JSONL格式的训练数据
            jsonl_path = os.path.join(self.training_data_dir, f"{dataset_name}.jsonl")
            
            with open(jsonl_path, 'w', encoding='utf-8') as f:
                for item in learning_data:
                    # 构建训练样本
                    if item['corrected_response']:
                        # 使用用户纠正的响应作为正确答案
                        conversation = [
                            {"role": "user", "content": item['user_query']},
                            {"role": "assistant", "content": item['corrected_response']}
                        ]
                    else:
                        # 使用原始响应作为正确答案（如果用户没有纠正）
                        conversation = [
                            {"role": "user", "content": item['user_query']},
                            {"role": "assistant", "content": item['assistant_response']}
                        ]
                    
                    # 转换为模型所需的格式
                    formatted_data = self._format_conversation(conversation)
                    
                    # 写入JSONL文件
                    f.write(json.dumps(formatted_data, ensure_ascii=False) + '\n')
            
            # 标记数据为已使用
            data_ids = [item['id'] for item in learning_data]
            db_manager.mark_learning_data_as_used(data_ids)
            
            logger.info(f"训练数据集生成成功: {jsonl_path}, 包含 {len(learning_data)} 条样本")
            return jsonl_path
        except Exception as e:
            logger.error(f"训练数据集生成失败: {str(e)}")
            return None
    
    def _format_conversation(self, conversation):
        """将对话格式化为模型训练所需的格式"""
        # 这里需要根据具体模型的要求来格式化数据
        # 以Qwen2.5模型为例，其格式通常为：
        # <|im_start|>user
        # 用户输入<|im_end|>
        # <|im_start|>assistant
        # 助手回复<|im_end|>
        
        formatted_text = ""
        
        for message in conversation:
            if message['role'] == 'user':
                formatted_text += f"<|im_start|>user\n{message['content']}<|im_end|>\n"
            elif message['role'] == 'assistant':
                formatted_text += f"<|im_start|>assistant\n{message['content']}<|im_end|>\n"
        
        # 移除末尾的换行符
        formatted_text = formatted_text.rstrip('\n')
        
        return {'text': formatted_text}
    
    def fine_tune_with_qlora(self, dataset_path, adapter_name=None):
        """使用QLoRA进行增量微调"""
        try:
            # 检查学习功能是否启用
            if not bool(self.learning_config.get('enabled', False)):
                logger.warning("学习功能未启用")
                return None
            
            # 检查数据集是否存在
            if not os.path.exists(dataset_path):
                logger.error(f"数据集文件不存在: {dataset_path}")
                return None
            
            # 生成适配器名称
            if not adapter_name:
                adapter_name = f"adapter_{uuid.uuid4().hex[:8]}"
            
            adapter_output_path = os.path.join(self.output_dir, adapter_name)
            
            logger.info(f"开始QLoRA微调，适配器名称: {adapter_name}, 输出路径: {adapter_output_path}")
            
            # 加载训练数据
            dataset = load_dataset('json', data_files=dataset_path)
            
            # 加载模型和分词器
            model_name_or_path = self.model_config['model_path'] or self.model_config['name']
            
            # 设置量化配置
            bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_use_double_quant=True,
                bnb_4bit_quant_type="nf4",
                bnb_4bit_compute_dtype=torch.float16
            )
            
            # 加载模型
            model = AutoModelForCausalLM.from_pretrained(
                model_name_or_path,
                quantization_config=bnb_config,
                device_map="auto",
                trust_remote_code=True,
                low_cpu_mem_usage=True
            )
            
            # 加载分词器
            tokenizer = AutoTokenizer.from_pretrained(
                model_name_or_path,
                trust_remote_code=True,
                padding_side="right"
            )
            
            # 添加pad_token（如果不存在）
            if tokenizer.pad_token is None:
                tokenizer.pad_token = tokenizer.eos_token
            
            # 准备模型进行kbit训练
            model = prepare_model_for_kbit_training(model)
            
            # 设置LoRA配置
            lora_config = LoraConfig(
                r=self.learning_config['qlora_config']['r'],
                lora_alpha=self.learning_config['qlora_config']['lora_alpha'],
                target_modules=self.learning_config['qlora_config']['target_modules'],
                lora_dropout=self.learning_config['qlora_config']['lora_dropout'],
                bias=self.learning_config['qlora_config']['bias'],
                task_type="CAUSAL_LM"
            )
            
            # 创建Peft模型
            model = get_peft_model(model, lora_config)
            
            # 打印可训练参数数量
            model.print_trainable_parameters()
            
            # 设置训练参数
            training_args = TrainingArguments(
                output_dir=adapter_output_path,
                per_device_train_batch_size=self.learning_config['training_args']['per_device_train_batch_size'],
                gradient_accumulation_steps=self.learning_config['training_args']['gradient_accumulation_steps'],
                learning_rate=self.learning_config['training_args']['learning_rate'],
                num_train_epochs=self.learning_config['training_args']['num_train_epochs'],
                save_strategy=self.learning_config['training_args']['save_strategy'],
                save_total_limit=self.learning_config['training_args']['save_total_limit'],
                fp16=True,
                optim="paged_adamw_32bit",
                lr_scheduler_type="cosine",
                warmup_ratio=0.03,
                logging_steps=10,
                report_to=[],  # 不使用wandb等日志工具
            )
            
            # 创建数据收集器
            data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
            
            # 定义数据预处理函数
            def preprocess_function(examples):
                return tokenizer(examples['text'], truncation=True, max_length=2048, padding="max_length")
            
            # 预处理数据集
            tokenized_dataset = dataset['train'].map(
                preprocess_function,
                batched=True,
                remove_columns=['text'],
                num_proc=4
            )
            
            # 开始训练
            from transformers import Trainer
            
            trainer = Trainer(
                model=model,
                args=training_args,
                train_dataset=tokenized_dataset,
                data_collator=data_collator
            )
            
            # 启动训练
            trainer.train()
            
            # 保存模型
            trainer.save_model(adapter_output_path)
            tokenizer.save_pretrained(adapter_output_path)
            
            logger.info(f"QLoRA微调完成，适配器已保存到: {adapter_output_path}")
            
            return adapter_name
        except Exception as e:
            logger.error(f"QLoRA微调失败: {str(e)}")
            return None
    
    def hot_reload_lora_adapter(self, adapter_name):
        """热加载LoRA适配器"""
        try:
            # 检查适配器是否存在
            adapter_path = os.path.join(self.output_dir, adapter_name)
            if not os.path.exists(adapter_path):
                logger.error(f"LoRA适配器不存在: {adapter_path}")
                return False
            
            # 使用模型管理器热更新适配器
            success = model_manager.update_lora_adapter(adapter_name)
            
            if success:
                logger.info(f"LoRA适配器热加载成功: {adapter_name}")
            
            return success
        except Exception as e:
            logger.error(f"LoRA适配器热加载失败: {str(e)}")
            return False
    
    def learn_from_user_feedback(self, user_query, assistant_response, corrected_response=None, feedback=None, is_positive=None):
        """从用户反馈中学习"""
        try:
            # 保存学习数据
            metadata = {
                'source': 'user_feedback',
                'timestamp': pd.Timestamp.now().isoformat()
            }
            
            success = self.save_learning_data(
                user_query=user_query,
                assistant_response=assistant_response,
                corrected_response=corrected_response,
                feedback=feedback,
                is_positive=is_positive,
                metadata=metadata
            )
            
            if success:
                logger.info("从用户反馈中学习成功")
            
            return success
        except Exception as e:
            logger.error(f"从用户反馈中学习失败: {str(e)}")
            return False
    
    def get_learning_stats(self):
        """获取学习功能统计信息"""
        try:
            with db_manager.get_cursor() as cursor:
                # 获取学习数据总数
                cursor.execute("SELECT COUNT(*) as total FROM learning_data")
                total = cursor.fetchone()['total']
                
                # 获取已使用和未使用的学习数据数量
                cursor.execute("SELECT COUNT(*) as used FROM learning_data WHERE used_in_training = true")
                used = cursor.fetchone()['used']
                
                # 获取正面和负面反馈数量
                cursor.execute("SELECT COUNT(*) as positive FROM learning_data WHERE is_positive = true")
                positive = cursor.fetchone()['positive']
                
                cursor.execute("SELECT COUNT(*) as negative FROM learning_data WHERE is_positive = false")
                negative = cursor.fetchone()['negative']
                
                # 获取可用的LoRA适配器
                available_adapters = []
                if os.path.exists(self.output_dir):
                    available_adapters = [d for d in os.listdir(self.output_dir) if os.path.isdir(os.path.join(self.output_dir, d))]
                
                lora_cfg = (self.model_config.get('lora') if isinstance(self.model_config, dict) else {}) or {}
                lora_enabled = bool(lora_cfg.get('enabled'))
                current_adapter = lora_cfg.get('adapter_name') if lora_enabled else None
                return {
                    'total_learning_data': total or 0,
                    'used_learning_data': used or 0,
                    'unused_learning_data': (total or 0) - (used or 0),
                    'positive_feedback': positive or 0,
                    'negative_feedback': negative or 0,
                    'available_adapters': available_adapters,
                    'current_adapter': current_adapter
                }
        except Exception as e:
            logger.error(f"获取学习统计信息失败: {str(e)}")
            return {
                'total_learning_data': 0,
                'used_learning_data': 0,
                'unused_learning_data': 0,
                'positive_feedback': 0,
                'negative_feedback': 0,
                'available_adapters': [],
                'current_adapter': None
            }
    
    def run_full_learning_cycle(self):
        """运行完整的学习周期：生成训练集 -> 微调 -> 热加载"""
        try:
            logger.info("开始完整的学习周期")
            
            # 生成训练数据集
            dataset_path = self.generate_training_dataset()
            if not dataset_path:
                logger.error("训练数据集生成失败，无法继续学习周期")
                return False
            
            # 执行QLoRA微调
            adapter_name = self.fine_tune_with_qlora(dataset_path)
            if not adapter_name:
                logger.error("QLoRA微调失败，无法继续学习周期")
                return False
            
            # 热加载LoRA适配器
            success = self.hot_reload_lora_adapter(adapter_name)
            if not success:
                logger.error("LoRA适配器热加载失败")
                return False
            
            logger.info("完整的学习周期执行成功")
            return True
        except Exception as e:
            logger.error(f"完整的学习周期执行失败: {str(e)}")
            return False

# 全局学习管理器实例
learning_manager = LearningManager()