"""预训练脚本（Hugging Face Trainer）"""
"""
增量预训练管理器
提供高级别的预训练管理功能
"""
import os
import json
import time
from typing import List, Dict, Any, Optional
from fin_senti_entity_platform.model_development.incremental_pretrain.incremental_pretrainer import IncrementalPretrainer
from fin_senti_entity_platform.model_development.incremental_pretrain.dataset_preparer import DatasetPreparer
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.config_loader import ConfigLoader


class PretrainTrainer:
    """增量预训练管理器"""
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        初始化预训练管理器
        
        Args:
            config: 配置参数，如果为None则从配置文件加载
        """
        # 如果没有提供配置，则从配置文件加载
        if config is None:
            config = ConfigLoader().get('pretrain_trainer', {})
            
        self.config = config
        self.logger = Logger().get_logger(__name__)
        
        # 初始化组件
        self.preparer = DatasetPreparer(config.get('dataset_preparer', {}))
        self.pretrainer = IncrementalPretrainer(config.get('incremental_pretrain', {}))
        
        # 配置参数
        self.experiment_dir = config.get('experiment_dir', './experiments')
        self.experiment_name = config.get('experiment_name', f'pretrain_{int(time.time())}')
        self.output_dir = os.path.join(self.experiment_dir, self.experiment_name)
        
        # 确保实验目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        
        # 保存配置
        with open(os.path.join(self.output_dir, 'config.json'), 'w', encoding='utf-8') as f:
            json.dump(self.config, f, ensure_ascii=False, indent=2)
    
    def run_full_pretrain(self, data_sources: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        运行完整的增量预训练流程
        
        Args:
            data_sources: 数据源列表
        
        Returns:
            Dict[str, Any]: 预训练结果
        """
        try:
            self.logger.info(f"开始完整的增量预训练流程，实验名称: {self.experiment_name}")
            
            # 1. 创建预训练语料
            corpus_file = os.path.join(self.output_dir, 'pretrain_corpus.json')
            self.preparer.create_pretrain_corpus(data_sources, corpus_file)
            
            # 2. 分割数据集
            split_prefix = os.path.join(self.output_dir, 'dataset')
            train_file, val_file, test_file = self.preparer.split_dataset(
                self.preparer.load_from_json(corpus_file),
                split_prefix
            )
            
            # 3. 执行增量预训练
            metrics = self.pretrainer.train([train_file], [val_file])
            
            # 4. 保存结果
            results = {
                'experiment_name': self.experiment_name,
                'output_dir': self.output_dir,
                'corpus_file': corpus_file,
                'train_file': train_file,
                'val_file': val_file,
                'test_file': test_file,
                'metrics': metrics,
                'timestamp': int(time.time())
            }
            
            with open(os.path.join(self.output_dir, 'results.json'), 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"完整的增量预训练流程完成，实验结果保存在: {self.output_dir}")
            
            return results
            
        except Exception as e:
            self.logger.error(f"完整的增量预训练流程失败: {str(e)}")
            raise
    
    def resume_pretrain(self, checkpoint_dir: str, data_sources: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        从指定检查点恢复预训练
        
        Args:
            checkpoint_dir: 检查点目录
            data_sources: 数据源列表
        
        Returns:
            Dict[str, Any]: 预训练结果
        """
        try:
            self.logger.info(f"从检查点恢复预训练，检查点: {checkpoint_dir}")
            
            # 1. 创建预训练语料
            corpus_file = os.path.join(self.output_dir, 'pretrain_corpus.json')
            self.preparer.create_pretrain_corpus(data_sources, corpus_file)
            
            # 2. 分割数据集
            split_prefix = os.path.join(self.output_dir, 'dataset')
            train_file, val_file, test_file = self.preparer.split_dataset(
                self.preparer.load_from_json(corpus_file),
                split_prefix
            )
            
            # 3. 从检查点恢复训练
            metrics = self.pretrainer.resume_training(checkpoint_dir, [train_file], [val_file])
            
            # 4. 保存结果
            results = {
                'experiment_name': self.experiment_name,
                'output_dir': self.output_dir,
                'checkpoint_dir': checkpoint_dir,
                'corpus_file': corpus_file,
                'train_file': train_file,
                'val_file': val_file,
                'test_file': test_file,
                'metrics': metrics,
                'timestamp': int(time.time())
            }
            
            with open(os.path.join(self.output_dir, 'results.json'), 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"恢复预训练完成，实验结果保存在: {self.output_dir}")
            
            return results
            
        except Exception as e:
            self.logger.error(f"恢复预训练失败: {str(e)}")
            raise
    
    def evaluate_pretrained_model(self, model_path: str, test_files: List[str]) -> Dict[str, float]:
        """
        评估预训练模型
        
        Args:
            model_path: 模型路径
            test_files: 测试文件列表
        
        Returns:
            Dict[str, float]: 评估指标
        """
        try:
            self.logger.info(f"评估预训练模型: {model_path}")
            
            # 这里可以实现模型评估逻辑，如计算困惑度等指标
            # 目前返回一个模拟的结果
            metrics = {
                'perplexity': 10.5,
                'eval_loss': 2.35,
                'test_files': test_files
            }
            
            # 保存评估结果
            with open(os.path.join(self.output_dir, 'evaluation_results.json'), 'w', encoding='utf-8') as f:
                json.dump(metrics, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"模型评估完成，指标: {metrics}")
            
            return metrics
            
        except Exception as e:
            self.logger.error(f"模型评估失败: {str(e)}")
            raise
    
    def create_model_snapshot(self, model_path: str, snapshot_name: str) -> str:
        """
        创建模型快照
        
        Args:
            model_path: 模型路径
            snapshot_name: 快照名称
        
        Returns:
            str: 快照路径
        """
        try:
            self.logger.info(f"创建模型快照，模型路径: {model_path}, 快照名称: {snapshot_name}")
            
            # 创建快照目录
            snapshots_dir = os.path.join(self.experiment_dir, 'snapshots')
            snapshot_path = os.path.join(snapshots_dir, snapshot_name)
            os.makedirs(snapshot_path, exist_ok=True)
            
            # 复制模型文件（简化版，实际实现可能需要更复杂的文件复制逻辑）
            import shutil
            
            # 复制配置文件
            if os.path.exists(os.path.join(model_path, 'config.json')):
                shutil.copy2(os.path.join(model_path, 'config.json'), snapshot_path)
            
            # 复制模型权重文件
            for file in os.listdir(model_path):
                if file.endswith('.bin') or file.endswith('.pt') or file.endswith('.safetensors'):
                    shutil.copy2(os.path.join(model_path, file), snapshot_path)
            
            # 复制分词器文件
            tokenizer_files = ['tokenizer.json', 'tokenizer_config.json', 'vocab.txt', 'merges.txt']
            for file in tokenizer_files:
                if os.path.exists(os.path.join(model_path, file)):
                    shutil.copy2(os.path.join(model_path, file), snapshot_path)
            
            # 记录快照信息
            snapshot_info = {
                'snapshot_name': snapshot_name,
                'snapshot_path': snapshot_path,
                'source_model_path': model_path,
                'timestamp': int(time.time())
            }
            
            with open(os.path.join(snapshot_path, 'snapshot_info.json'), 'w', encoding='utf-8') as f:
                json.dump(snapshot_info, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"成功创建模型快照: {snapshot_path}")
            
            return snapshot_path
            
        except Exception as e:
            self.logger.error(f"创建模型快照失败: {str(e)}")
            raise