#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
人才流失预测系统 - 主运行脚本
整合数据处理、模型训练和评估的完整工作流
"""

import os
import sys
import logging
import warnings
from datetime import datetime
os.environ["JOBLIB_MULTIPROCESSING"] = "0"  # 禁用joblib多进程
os.environ["LOKY_PICKLER"] = "pickle"       # 使用更兼容的pickler

# 设置环境变量抑制XGBoost警告
os.environ['XGBOOST_VERBOSE'] = '0'

# 抑制XGBoost和其他警告
warnings.filterwarnings('ignore', category=UserWarning, module='xgboost')
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from data_processor import DataProcessor
from model_trainer import ModelTrainer
from model_evaluator import ModelEvaluator

def setup_logging():
    """设置日志"""
    # 创建logs目录
    os.makedirs('../log', exist_ok=True)
    
    # 配置日志
    log_filename = f'../log/run_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'
    
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_filename, encoding='utf-8'),
            logging.StreamHandler(sys.stdout)
        ]
    )
    
    return logging.getLogger(__name__)

def run_complete_pipeline():
    """运行完整的机器学习流水线"""
    logger = setup_logging()
    
    logger.info("=" * 60)
    logger.info("开始运行人才流失预测系统")
    logger.info("=" * 60)
    
    try:
        # 第一步：数据处理
        logger.info("第一步：数据处理和特征工程")
        logger.info("-" * 40)
        
        processor = DataProcessor()
        
        # 加载数据
        logger.info("加载数据...")
        train_data, test_data = processor.load_data('../data/train.csv', '../data/test2.csv')
        logger.info(f"训练数据形状: {train_data.shape}")
        logger.info(f"测试数据形状: {test_data.shape}")
        
        # 特征分析
        logger.info("进行特征分析...")
        X, y = processor.analyze_features(train_data)
        
        # 数据预处理
        logger.info("数据预处理...")
        X_train, X_test, y_train, y_test = processor.preprocess_data(train_data, test_data)
        logger.info(f"处理后训练集形状: {X_train.shape}")
        logger.info(f"处理后测试集形状: {X_test.shape}")
        
        # 保存预处理器
        logger.info("保存预处理器...")
        processor.save_preprocessor('../models/preprocessor.pkl')
        
        # 绘制特征重要性
        logger.info("绘制特征重要性图...")
        processor.plot_feature_importance()
        
        # 第二步：模型训练
        logger.info("\n第二步：模型训练")
        logger.info("-" * 40)
        
        trainer = ModelTrainer()
        
        # 创建基模型
        logger.info("创建基模型...")
        trainer.create_base_models()
        
        # 训练基模型
        logger.info("训练基模型...")
        base_results = trainer.train_base_models(X_train, y_train, X_test, y_test)
        
        # 生成元特征
        logger.info("生成元特征...")
        meta_features = trainer.generate_meta_features(X_train, y_train)
        test_meta_features = trainer.generate_test_meta_features(X_train, y_train, X_test)
        
        # 训练元模型
        logger.info("训练元模型...")
        meta_model = trainer.train_meta_model(meta_features, y_train, test_meta_features, y_test)
        
        # 保存训练结果
        logger.info("保存训练结果...")
        trainer.save_training_results()
        
        # 绘制ROC曲线
        logger.info("绘制ROC曲线...")
        trainer.plot_roc_curves(X_test, y_test)
        
        # 第三步：模型评估
        logger.info("\n第三步：模型评估")
        logger.info("-" * 40)
        
        evaluator = ModelEvaluator()
        
        # 加载模型
        logger.info("加载模型...")
        evaluator.load_models()
        
        # 评估所有模型
        logger.info("评估所有模型...")
        results = evaluator.evaluate_all_models(X_test, y_test)
        
        # 生成评估报告
        logger.info("生成评估报告...")
        report_df = evaluator.generate_evaluation_report()
        
        # 绘制评估图表
        logger.info("绘制评估图表...")
        evaluator.plot_confusion_matrices()
        evaluator.plot_roc_curves(X_test, y_test)
        
        # 完成
        logger.info("\n" + "=" * 60)
        logger.info("所有流程完成！")
        logger.info("=" * 60)
        
        # 输出最佳模型
        if not report_df.empty:
            best_model = report_df.loc[report_df['ROC-AUC'].idxmax()]
            logger.info(f"最佳模型: {best_model['Model']}")
            logger.info(f"最佳ROC-AUC: {best_model['ROC-AUC']:.4f}")
            logger.info(f"最佳准确率: {best_model['Accuracy']:.4f}")
        
        return True
        
    except Exception as e:
        logger.error(f"运行过程中出现错误: {str(e)}")
        logger.error("详细错误信息:", exc_info=True)
        return False

def run_training_only():
    """仅运行训练流程"""
    logger = setup_logging()
    logger.info("运行训练流程...")
    
    try:
        # 数据处理
        processor = DataProcessor()
        train_data, test_data = processor.load_data('../data/train.csv', '../data/test2.csv')
        X_train, X_test, y_train, y_test = processor.preprocess_data(train_data, test_data)
        processor.save_preprocessor('../models/preprocessor.pkl')
        
        # 模型训练
        trainer = ModelTrainer()
        trainer.create_base_models()
        base_results = trainer.train_base_models(X_train, y_train, X_test, y_test)
        
        meta_features = trainer.generate_meta_features(X_train, y_train)
        test_meta_features = trainer.generate_test_meta_features(X_train, y_train, X_test)
        meta_model = trainer.train_meta_model(meta_features, y_train, test_meta_features, y_test)
        
        trainer.save_training_results()
        trainer.plot_roc_curves(X_test, y_test)
        
        logger.info("训练完成！")
        return True
        
    except Exception as e:
        logger.error(f"训练过程中出现错误: {str(e)}")
        return False

def run_evaluation_only():
    """仅运行评估流程"""
    logger = setup_logging()
    logger.info("运行评估流程...")
    
    try:
        # 数据处理
        processor = DataProcessor()
        train_data, test_data = processor.load_data('../data/train.csv', '../data/test2.csv')
        X_train, X_test, y_train, y_test = processor.preprocess_data(train_data, test_data)
        
        # 模型评估
        evaluator = ModelEvaluator()
        evaluator.load_models()
        results = evaluator.evaluate_all_models(X_test, y_test)
        
        report_df = evaluator.generate_evaluation_report()
        evaluator.plot_confusion_matrices()
        evaluator.plot_roc_curves(X_test, y_test)
        
        logger.info("评估完成！")
        return True
        
    except Exception as e:
        logger.error(f"评估过程中出现错误: {str(e)}")
        return False

if __name__ == '__main__':
    import argparse
    
    parser = argparse.ArgumentParser(description='人才流失预测系统')
    parser.add_argument('--mode', choices=['full', 'train', 'eval'], 
                       default='full', help='运行模式')
    
    args = parser.parse_args()
    
    if args.mode == 'full':
        success = run_complete_pipeline()
    elif args.mode == 'train':
        success = run_training_only()
    elif args.mode == 'eval':
        success = run_evaluation_only()
    
    if success:
        print("\n✅ 运行成功！")
        sys.exit(0)
    else:
        print("\n❌ 运行失败！")
        sys.exit(1) 