"""
Training模块 - 训练编排器
基于interface_design.md设计，负责训练流程编排
支持11分类任务的RandomForest训练
"""

import numpy as np
import pandas as pd
import json
import time
import yaml
from typing import Dict, Any, Tuple, List
from pathlib import Path
import logging
import os
from datetime import datetime

import models
from models import create_model


class TrainingOperation:
    """训练编排器 - 只负责流程编排"""
    
    def __init__(self, config: Dict[str, Any], paths: Dict[str, str]):
        self.config = config
        self.paths = paths
        self.logger = self._setup_logger()
        self.results = {}
        
        # 创建必要的目录
        self._create_directories()
    
    def _setup_logger(self) -> logging.Logger:
        """设置日志记录器"""
        logger = logging.getLogger('TrainingOperation')
        logger.setLevel(logging.INFO)
        
        # 避免重复添加处理器
        if not logger.handlers:
            # 创建文件处理器
            log_dir = self.paths.get('logs', {}).get('base_path', 'logs')
            os.makedirs(log_dir, exist_ok=True)
            
            file_handler = logging.FileHandler(
                os.path.join(log_dir, 'training.log'),
                encoding='utf-8'
            )
            file_handler.setLevel(logging.INFO)
            
            # 创建控制台处理器
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            
            # 创建格式器
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            file_handler.setFormatter(formatter)
            console_handler.setFormatter(formatter)
            
            # 添加处理器
            logger.addHandler(file_handler)
            logger.addHandler(console_handler)
        
        return logger
    
    def _create_directories(self):
        """创建必要的目录结构"""
        dirs_to_create = [
            self.paths.get('models', {}).get('base_path', 'models'),
            self.paths.get('results', {}).get('base_path', 'results'),
            self.paths.get('logs', {}).get('base_path', 'logs')
        ]
        
        for dir_path in dirs_to_create:
            os.makedirs(dir_path, exist_ok=True)
    
    # 单模型训练
    def train_single_model_operator(self, model_type: str, data_path: str, 
                                   force_retrain: bool = False, **kwargs) -> Dict[str, Any]:
        """训练单个模型 - 只负责编排"""
        self.logger.info(f"🚀 开始训练单个模型: {model_type}")
        
        if model_type == 'random_forest':
            return self._smart_train_random_forest_operator(data_path, **kwargs)
        else:
            raise ValueError(f"暂不支持的模型类型: {model_type}")
    
    def train_with_auto_enhancement_operator(self, model_type: str, data_path: str, **kwargs) -> Dict[str, Any]:
        """智能训练模式 - 自动判断是否进行增量训练"""
        self.logger.info(f"🧠 开始智能训练模式: {model_type}")
        
        if model_type == 'random_forest':
            return self._smart_train_random_forest_operator(data_path, **kwargs)
        else:
            raise ValueError(f"暂不支持的模型类型: {model_type}")
    
    def _smart_train_random_forest_operator(self, data_path: str, **kwargs) -> Dict[str, Any]:
        """智能RandomForest训练 - 自动判断增量训练"""
        model_save_path = self.paths.get('models', {}).get('random_forest', 'models/random_forest_model.pkl')
        
        # 记录开始训练
        start_time = time.time()
        
        # 检查是否存在已有模型
        if os.path.exists(model_save_path):
            self.logger.info(f"🔄 发现已有模型，进行增量训练: {model_save_path}")
        else:
            self.logger.info("🆕 未发现已有模型，进行全新训练")
        
        self._log_training_start('RandomForest', self.config.get('models', {}).get('random_forest', {}))
        
        try:
            # 1. 创建模型
            rf_model = create_model('random_forest', self.config)
            
            # 2. 执行训练（传递已有模型路径，让模型自己决定是否加载）
            results = rf_model.train_operator(data_path, 'ml', existing_model_path=model_save_path)
            
            # 3. 计算训练时间
            training_time = time.time() - start_time
            results['training_time'] = training_time
            
            # 4. 保存模型
            rf_model.save(model_save_path)
            
            # 5. 保存向量化器
            vectorizer_path = model_save_path.replace('_model.pkl', '_vectorizer.pkl')
            rf_model.save_vectorizer(rf_model.vectorizer, vectorizer_path)
            
            # 6. 保存标签映射
            label_mapping_path = model_save_path.replace('_model.pkl', '_label_mapping.json')
            os.makedirs(os.path.dirname(label_mapping_path), exist_ok=True)
            with open(label_mapping_path, 'w', encoding='utf-8') as f:
                json.dump(rf_model.label_mapping, f, ensure_ascii=False, indent=2)
            
            # 7. 保存训练结果
            result_filename = 'random_forest_incremental_results.json' if results.get('training_type') == 'incremental' else 'random_forest_results.json'
            self._save_results(result_filename, results)
            
            # 记录结束
            self._log_training_end('RandomForest', training_time, results['test_metrics'])
            
            return results
            
        except Exception as e:
            self.logger.error(f"❌ RandomForest训练失败: {str(e)}")
            raise
    
    # 批量训练
    def train_ml_models_operator(self, data_path: str) -> Dict[str, Any]:
        """训练所有ML模型"""
        self.logger.info("🚀 开始训练所有ML模型")
        
        ml_results = {}
        
        # 训练RandomForest
        try:
            rf_results = self.train_single_model_operator('random_forest', data_path)
            ml_results['random_forest'] = rf_results
            self.logger.info("✅ RandomForest训练完成")
        except Exception as e:
            self.logger.error(f"❌ RandomForest训练失败: {str(e)}")
            ml_results['random_forest'] = {'error': str(e)}
        
        # 可以在这里添加SVM训练
        # svm_results = self.train_single_model_operator('svm', data_path)
        # ml_results['svm'] = svm_results
        
        # 保存ML模型结果
        self._save_results('ml_models_results.json', ml_results)
        
        return ml_results
    
    def train_dl_models_operator(self, data_path: str) -> Dict[str, Any]:
        """训练所有DL模型"""
        self.logger.info("🚀 开始训练所有DL模型")
        
        # 暂时返回空结果，后续实现
        dl_results = {}
        
        self.logger.info("ℹ️ DL模型训练功能待实现")
        return dl_results
    
    def train_all_models_operator(self, data_path: str) -> Dict[str, Any]:
        """训练所有模型"""
        self.logger.info("🚀 开始训练所有模型")
        
        all_results = {}
        
        # 训练ML模型
        ml_results = self.train_ml_models_operator(data_path)
        all_results['ml_models'] = ml_results
        
        # 训练DL模型
        dl_results = self.train_dl_models_operator(data_path)
        all_results['dl_models'] = dl_results
        
        # 保存所有结果
        self._save_results('all_models_results.json', all_results)
        
        return all_results
    
    # 配置管理
    @staticmethod
    def load_config_operator(config_path: str = 'config/models.yaml') -> Dict[str, Any]:
        """加载配置文件"""
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            return config
        except FileNotFoundError:
            # 返回默认配置
            return {
                'random_state': None,  # 全局随机数配置
                'models': {
                    'random_forest': {
                        'n_estimators': 100,
                        'n_jobs': -1
                    }
                },
                'feature_extraction': {
                    'tfidf': {
                        'analyzer': 'char',
                        'ngram_range': [1, 3],
                        'max_features': 10000,
                        'min_df': 2,
                        'max_df': 0.95
                    }
                },
                'data': {
                    'split_ratios': [0.8, 0.1, 0.1],
                    'random_state': 42,
                    'stratify': True
                }
            }
    
    @staticmethod
    def validate_config_operator(config: Dict[str, Any]) -> Tuple[bool, List[str]]:
        """验证配置"""
        errors = []
        warnings = []
        
        # 检查必要的配置项
        required_sections = ['models', 'feature_extraction', 'data']
        for section in required_sections:
            if section not in config:
                errors.append(f"缺少配置节: {section}")
        
        # 检查RandomForest配置
        if 'models' in config:
            if 'random_forest' not in config['models']:
                errors.append("缺少RandomForest模型配置")
            else:
                rf_config = config['models']['random_forest']
                # 检查RandomForest关键参数
                rf_required = ['n_estimators', 'random_state']
                for param in rf_required:
                    if param not in rf_config:
                        warnings.append(f"RandomForest缺少{param}参数，将使用默认值")
        
        # 检查TF-IDF配置
        if 'feature_extraction' in config:
            if 'tfidf' not in config['feature_extraction']:
                errors.append("缺少TF-IDF特征提取配置")
            else:
                tfidf_config = config['feature_extraction']['tfidf']
                # 检查TF-IDF关键参数
                tfidf_required = ['analyzer', 'ngram_range', 'max_features']
                for param in tfidf_required:
                    if param not in tfidf_config:
                        warnings.append(f"TF-IDF缺少{param}参数，将使用默认值")
        
        # 检查标签映射
        if 'label_mapping' not in config:
            warnings.append("缺少label_mapping配置，将使用默认映射")
        elif len(config['label_mapping']) != 11:
            warnings.append(f"标签映射数量异常: {len(config['label_mapping'])}，期望11个")
        
        # 检查数据配置
        if 'data' in config:
            if 'split_ratios' not in config['data']:
                warnings.append("缺少数据划分比例配置，将使用默认值")
            elif abs(sum(config['data']['split_ratios']) - 1.0) > 0.01:
                errors.append(f"数据划分比例总和不等于1.0: {config['data']['split_ratios']}")
        
        # 合并错误和警告
        all_issues = errors + warnings
        is_valid = len(errors) == 0  # 只有错误才影响有效性，警告不影响
        
        return is_valid, all_issues
    
    # 日志相关方法
    def _log_training_start(self, model_name: str, config: Dict[str, Any]):
        """记录训练开始"""
        self.logger.info(f"🚀 开始训练{model_name}模型")
        self.logger.info(f"模型配置: {config}")
        self.logger.info(f"开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    
    def _log_training_end(self, model_name: str, training_time: float, final_metrics: Dict[str, float]):
        """记录训练结束"""
        self.logger.info(f"✅ {model_name}训练完成")
        self.logger.info(f"训练用时: {training_time:.2f}秒")
        self.logger.info(f"最终测试集性能:")
        self.logger.info(f"  准确率: {final_metrics['accuracy']:.4f}")
        self.logger.info(f"  精确率(macro): {final_metrics['precision_macro']:.4f}")
        self.logger.info(f"  召回率(macro): {final_metrics['recall_macro']:.4f}")
        self.logger.info(f"  F1分数(macro): {final_metrics['f1_macro']:.4f}")
        self.logger.info(f"结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 结果管理方法
    def _save_results(self, filename: str, results: Dict[str, Any]):
        """保存结果到JSON文件"""
        results_dir = self.paths.get('results', {}).get('base_path', 'results')
        filepath = os.path.join(results_dir, filename)
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2, default=str)
        
        self.logger.info(f"结果已保存到: {filepath}")
    
    def generate_comparison_report(self, all_results: Dict[str, Any]) -> str:
        """生成模型对比报告"""
        report_lines = []
        report_lines.append("=" * 80)
        report_lines.append("模型性能对比报告")
        report_lines.append("=" * 80)
        
        # 收集所有模型结果
        models_data = []
        
        if 'ml_models' in all_results:
            for model_name, results in all_results['ml_models'].items():
                if 'test_metrics' in results:
                    models_data.append({
                        'name': model_name,
                        'type': 'ML',
                        'metrics': results['test_metrics'],
                        'time': results.get('training_time', 0)
                    })
        
        if not models_data:
            report_lines.append("没有可用的模型结果")
            return "\n".join(report_lines)
        
        # 表头
        report_lines.append(f"{'模型':<15} {'类型':<6} {'准确率':<8} {'F1(macro)':<10} {'训练时间(s)':<12}")
        report_lines.append("-" * 80)
        
        # 模型数据
        best_f1 = 0
        best_model = None
        
        for data in models_data:
            name = data['name']
            model_type = data['type']
            metrics = data['metrics']
            training_time = data['time']
            
            accuracy = metrics['accuracy']
            f1_macro = metrics['f1_macro']
            
            if f1_macro > best_f1:
                best_f1 = f1_macro
                best_model = name
            
            report_lines.append(
                f"{name:<15} {model_type:<6} {accuracy:<8.4f} {f1_macro:<10.4f} {training_time:<12.2f}"
            )
        
        report_lines.append("-" * 80)
        report_lines.append(f"最佳模型: {best_model} (F1分数: {best_f1:.4f})")
        report_lines.append("=" * 80)
        
        report = "\n".join(report_lines)
        
        # 保存报告
        report_path = os.path.join(
            self.paths.get('results', {}).get('base_path', 'results'),
            'model_comparison_report.txt'
        )
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(report)
        
        self.logger.info(f"对比报告已保存到: {report_path}")
        
        return report


def create_training_operation(config_path: str = 'config/models.yaml', 
                            paths_path: str = 'config/paths.yaml') -> TrainingOperation:
    """工厂函数：创建训练编排器"""
    # 加载配置
    config = TrainingOperation.load_config_operator(config_path)
    
    # 加载路径配置
    try:
        with open(paths_path, 'r', encoding='utf-8') as f:
            paths = yaml.safe_load(f)
    except FileNotFoundError:
        # 默认路径配置
        paths = {
            'models': {'base_path': 'models', 'random_forest': 'models/random_forest_model.pkl'},
            'results': {'base_path': 'results'},
            'logs': {'base_path': 'logs'}
        }
    
    return TrainingOperation(config, paths)


if __name__ == "__main__":
    # 测试训练编排器 - 使用YAML配置文件
    try:
        trainer = create_training_operation()
        print("✅ 训练编排器创建成功（使用YAML配置）")
        
        # 显示加载的配置信息
        print(f"加载的配置节: {list(trainer.config.keys())}")
        print(f"全局random_state: {trainer.config.get('random_state', 'N/A')}")
        print(f"训练配置: {trainer.config.get('training', {})}")
        
        # 验证配置
        is_valid, errors = trainer.validate_config_operator(trainer.config)
        if is_valid:
            print("✅ 配置验证通过")
            if errors:
                print(f"⚠️ 警告: {errors}")
        else:
            print(f"❌ 配置验证失败: {errors}")
            
    except FileNotFoundError as e:
        print(f"❌ {e}")
        print("请确保在项目根目录运行，且config/目录下有配置文件")
