"""
增量预训练模块
用于在金融领域语料上对大语言模型进行领域适应预训练
"""
import os
import torch
from typing import Dict, List, Any, Optional
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, DataCollatorForLanguageModeling
from transformers import Trainer, BitsAndBytesConfig
import datasets
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.config_loader import ConfigLoader
import json


class IncrementalPretrainer:
    """增量预训练类"""
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        初始化增量预训练器
        
        Args:
            config: 预训练配置参数，如果为None则从配置文件加载
        """
        # 如果没有提供配置，则从配置文件加载
        if config is None:
            config = ConfigLoader().get('incremental_pretrain', {})
            
        self.config = config
        self.logger = Logger().get_logger(__name__)
        
        # 初始化配置
        self.model_name = config.get('model_name', 'deepseek-ai/deepseek-llm-7b-base')
        self.output_dir = config.get('output_dir', './model_saves/incremental_pretrained')
        self.max_seq_length = config.get('max_seq_length', 1024)
        self.batch_size = config.get('batch_size', 8)
        self.learning_rate = config.get('learning_rate', 5e-6)
        self.num_epochs = config.get('num_epochs', 1)
        self.warmup_ratio = config.get('warmup_ratio', 0.1)
        self.weight_decay = config.get('weight_decay', 0.01)
        self.logging_steps = config.get('logging_steps', 100)
        self.save_steps = config.get('save_steps', 1000)
        self.gradient_accumulation_steps = config.get('gradient_accumulation_steps', 4)
        self.use_quantization = config.get('use_quantization', True)
        
        # 初始化模型和分词器
        self.tokenizer = None
        self.model = None
        
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        
    def load_model(self):
        """
        加载基础模型和分词器
        """
        try:
            self.logger.info(f"正在加载基础模型: {self.model_name}")
            
            # 加载分词器
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            
            # 设置pad_token_id（如果不存在）
            if self.tokenizer.pad_token_id is None:
                self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
                
            # 配置量化参数（如果启用）
            quantization_config = None
            if self.use_quantization and torch.cuda.is_available():
                quantization_config = BitsAndBytesConfig(
                    load_in_4bit=True,
                    bnb_4bit_use_double_quant=True,
                    bnb_4bit_quant_type="nf4",
                    bnb_4bit_compute_dtype=torch.float16
                )
                
            # 加载模型
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_name,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None,
                quantization_config=quantization_config,
                use_cache=False
            )
            
            self.logger.info("成功加载基础模型和分词器")
            
        except Exception as e:
            self.logger.error(f"加载模型失败: {str(e)}")
            raise
    
    def prepare_dataset(self, data_files: List[str]) -> datasets.Dataset:
        """
        准备预训练数据集
        
        Args:
            data_files: 数据文件列表
        
        Returns:
            datasets.Dataset: 处理后的数据集
        """
        try:
            self.logger.info(f"正在准备数据集，文件数量: {len(data_files)}")
            
            # 加载数据集
            dataset = datasets.load_dataset(
                'json',
                data_files=data_files,
                split='train'
            )
            
            self.logger.info(f"加载完成，数据集大小: {len(dataset)}")
            
            # 预处理函数
            def preprocess_function(examples):
                # 确保文本字段存在
                texts = [text for text in examples['text'] if isinstance(text, str) and text.strip()]
                
                # 分词并截断
                tokenized = self.tokenizer(
                    texts,
                    max_length=self.max_seq_length,
                    truncation=True,
                    padding=False,
                    return_overflowing_tokens=True,
                    return_length=True
                )
                
                # 构建输入和标签
                input_ids = []
                for length, input_id in zip(tokenized['length'], tokenized['input_ids']):
                    if length > 0:
                        input_ids.append(input_id)
                        
                return {'input_ids': input_ids}
                
            # 映射预处理函数
            tokenized_dataset = dataset.map(
                preprocess_function,
                batched=True,
                remove_columns=dataset.column_names,
                num_proc=4  # 使用多进程加速
            )
            
            self.logger.info(f"预处理完成，tokenized数据集大小: {len(tokenized_dataset)}")
            
            # 过滤空样本
            tokenized_dataset = tokenized_dataset.filter(
                lambda x: len(x['input_ids']) > 0
            )
            
            self.logger.info(f"过滤后，数据集大小: {len(tokenized_dataset)}")
            
            return tokenized_dataset
            
        except Exception as e:
            self.logger.error(f"准备数据集失败: {str(e)}")
            raise
    
    def train(self, train_data_files: List[str], val_data_files: Optional[List[str]] = None):
        """
        执行增量预训练
        
        Args:
            train_data_files: 训练数据文件列表
            val_data_files: 验证数据文件列表（可选）
        """
        try:
            # 加载模型
            self.load_model()
            
            # 准备训练数据集
            train_dataset = self.prepare_dataset(train_data_files)
            
            # 准备验证数据集（如果提供）
            eval_dataset = None
            if val_data_files:
                eval_dataset = self.prepare_dataset(val_data_files)
                
            # 准备数据整理器
            data_collator = DataCollatorForLanguageModeling(
                tokenizer=self.tokenizer,
                mlm=False  # 不使用掩码语言模型
            )
            
            # 配置训练参数
            training_args = TrainingArguments(
                output_dir=self.output_dir,
                num_train_epochs=self.num_epochs,
                per_device_train_batch_size=self.batch_size,
                per_device_eval_batch_size=self.batch_size,
                gradient_accumulation_steps=self.gradient_accumulation_steps,
                learning_rate=self.learning_rate,
                weight_decay=self.weight_decay,
                warmup_ratio=self.warmup_ratio,
                logging_steps=self.logging_steps,
                save_steps=self.save_steps,
                evaluation_strategy="steps" if eval_dataset else "no",
                eval_steps=self.save_steps if eval_dataset else None,
                fp16=torch.cuda.is_available(),
                optim="adamw_torch",
                report_to="tensorboard",
                load_best_model_at_end=bool(eval_dataset),
                metric_for_best_model="loss" if eval_dataset else None,
                greater_is_better=False if eval_dataset else None,
            )
            
            # 创建Trainer实例
            trainer = Trainer(
                model=self.model,
                args=training_args,
                train_dataset=train_dataset,
                eval_dataset=eval_dataset,
                tokenizer=self.tokenizer,
                data_collator=data_collator,
            )
            
            # 开始训练
            self.logger.info(f"开始增量预训练，输出目录: {self.output_dir}")
            train_result = trainer.train()
            
            # 保存模型
            self.logger.info("保存预训练模型")
            trainer.save_model()
            
            # 记录训练指标
            metrics = train_result.metrics
            self.logger.info(f"训练完成，指标: {metrics}")
            
            # 保存训练指标
            with open(os.path.join(self.output_dir, "train_metrics.json"), "w") as f:
                json.dump(metrics, f, ensure_ascii=False, indent=2)
            
            # 如果有验证集，评估模型
            if eval_dataset:
                self.logger.info("开始评估模型")
                eval_metrics = trainer.evaluate()
                self.logger.info(f"评估完成，指标: {eval_metrics}")
                
                # 保存评估指标
                with open(os.path.join(self.output_dir, "eval_metrics.json"), "w") as f:
                    json.dump(eval_metrics, f, ensure_ascii=False, indent=2)
            
            return metrics
            
        except Exception as e:
            self.logger.error(f"增量预训练失败: {str(e)}")
            raise

    def resume_training(self, checkpoint_dir: str, train_data_files: List[str], val_data_files: Optional[List[str]] = None):
        """
        从指定检查点恢复训练
        
        Args:
            checkpoint_dir: 检查点目录
            train_data_files: 训练数据文件列表
            val_data_files: 验证数据文件列表（可选）
        """
        try:
            if not os.path.exists(checkpoint_dir):
                self.logger.error(f"检查点目录不存在: {checkpoint_dir}")
                raise FileNotFoundError(f"检查点目录不存在: {checkpoint_dir}")
                
            self.logger.info(f"从检查点恢复训练: {checkpoint_dir}")
            
            # 准备数据集
            train_dataset = self.prepare_dataset(train_data_files)
            eval_dataset = self.prepare_dataset(val_data_files) if val_data_files else None
            
            # 配置训练参数
            training_args = TrainingArguments(
                output_dir=self.output_dir,
                num_train_epochs=self.num_epochs,
                per_device_train_batch_size=self.batch_size,
                per_device_eval_batch_size=self.batch_size,
                gradient_accumulation_steps=self.gradient_accumulation_steps,
                learning_rate=self.learning_rate,
                weight_decay=self.weight_decay,
                warmup_ratio=self.warmup_ratio,
                logging_steps=self.logging_steps,
                save_steps=self.save_steps,
                evaluation_strategy="steps" if eval_dataset else "no",
                eval_steps=self.save_steps if eval_dataset else None,
                fp16=torch.cuda.is_available(),
                optim="adamw_torch",
                report_to="tensorboard",
                load_best_model_at_end=bool(eval_dataset),
                metric_for_best_model="loss" if eval_dataset else None,
                greater_is_better=False if eval_dataset else None,
            )
            
            # 加载模型和分词器
            self.tokenizer = AutoTokenizer.from_pretrained(checkpoint_dir)
            self.model = AutoModelForCausalLM.from_pretrained(
                checkpoint_dir,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None,
                use_cache=False
            )
            
            # 创建Trainer实例
            trainer = Trainer(
                model=self.model,
                args=training_args,
                train_dataset=train_dataset,
                eval_dataset=eval_dataset,
                tokenizer=self.tokenizer,
                data_collator=DataCollatorForLanguageModeling(
                    tokenizer=self.tokenizer,
                    mlm=False
                ),
            )
            
            # 恢复训练
            self.logger.info("开始恢复训练")
            train_result = trainer.train(resume_from_checkpoint=checkpoint_dir)
            
            # 保存模型
            self.logger.info("保存预训练模型")
            trainer.save_model()
            
            # 记录训练指标
            metrics = train_result.metrics
            self.logger.info(f"训练完成，指标: {metrics}")
            
            return metrics
            
        except Exception as e:
            self.logger.error(f"恢复训练失败: {str(e)}")
            raise

    def create_merged_lora_model(self, lora_checkpoint: str, output_dir: str):
        """
        将LoRA适配器与基础模型合并
        
        Args:
            lora_checkpoint: LoRA检查点目录
            output_dir: 合并后模型保存目录
        """
        try:
            from peft import PeftModel
            
            self.logger.info(f"正在合并LoRA适配器: {lora_checkpoint}")
            
            # 加载基础模型
            base_model = AutoModelForCausalLM.from_pretrained(
                self.model_name,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None,
                use_cache=False
            )
            
            # 加载LoRA适配器
            lora_model = PeftModel.from_pretrained(
                base_model,
                lora_checkpoint,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
            )
            
            # 合并模型
            merged_model = lora_model.merge_and_unload()
            
            # 确保输出目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            # 保存合并后的模型
            merged_model.save_pretrained(output_dir)
            self.tokenizer.save_pretrained(output_dir)
            
            self.logger.info(f"成功合并并保存模型到: {output_dir}")
            
        except ImportError:
            self.logger.error("未安装peft库，请先安装: pip install peft")
            raise
        except Exception as e:
            self.logger.error(f"合并LoRA适配器失败: {str(e)}")
            raise