import os
import numpy as np
import torch
import torch.multiprocessing as mp

from src.config_loader import ConfigLoader
from src.logger import Logger
from src.data_processor import DataProcessor
from src.model_manager import ModelManager
from src.evaluator import Evaluator


def run_pipeline(rank: int = 0, world_size: int = 1):
    """运行建模流水线，支持分布式训练"""
    # 设置环境变量（分布式训练）
    if world_size > 1:
        os.environ['MASTER_ADDR'] = 'localhost'
        os.environ['MASTER_PORT'] = '12355'
        os.environ['LOCAL_RANK'] = str(rank)

    # 1. 加载配置
    config = ConfigLoader('config/pipeline_config.yaml')

    # 2. 初始化日志系统（仅主进程）
    if rank == 0:
        logger = Logger.quick_init(name="price_prediction")
        logger.info("===== 启动企业级建模流水线 =====")
    else:
        logger = Logger.quick_init(name="price_prediction_worker")

    # 3. 初始化数据处理器
    data_processor = DataProcessor(config, logger)

    # 4. 加载和预处理数据
    logger.info("开始数据处理流程")
    df, x, y = data_processor.load_data()

    # 检查是否有历史数据用于漂移检测
    try:
        prev_df, _, _ = data_processor.load_data(version="prev_version")
        drift = data_processor.check_drift(prev_df, df)
        if drift and rank == 0:
            logger.warning(f"检测到数据漂移，建议重新训练模型: {drift}")
    except:
        logger.info("没有找到历史数据，跳过漂移检测")

    x_train, x_val, x_test, y_train, y_val, y_test = data_processor.split_data(
        x, y, distributed=config.get('training.distributed', False)
    )
    x_train_balanced, y_train_balanced = data_processor.balance_data(x_train, y_train)

    # 新增：先对所有数据集进行增强（如果启用）
    if config.get('data.preprocess.augmentation.enable', False):
        # 转换为张量进行增强（注意：增强后会变为9维）
        x_train_balanced = data_processor.data_augmentation(
            torch.tensor(x_train_balanced, dtype=torch.float32)
        ).numpy()
        x_val = data_processor.data_augmentation(
            torch.tensor(x_val, dtype=torch.float32)
        ).numpy()
        x_test = data_processor.data_augmentation(
            torch.tensor(x_test, dtype=torch.float32)
        ).numpy()

    # 再进行标准化（此时scaler会拟合9维特征）
    x_train_proc, x_val_proc, x_test_proc = data_processor.preprocess(x_train_balanced, x_val, x_test)


    # 5. 创建数据集，支持分布式采样
    distributed = config.get('training.distributed', False)
    train_set, val_set, test_set, train_sampler = data_processor.create_datasets(
        x_train_proc, x_val_proc, x_test_proc,
        y_train_balanced, y_val, y_test,
        distributed=distributed
    )

    # 6. 计算类别权重（支持多任务）
    tasks = config.get('data.tasks', ["price_range"])
    class_weights = {}
    for task in tasks:
        # 简化处理，实际应用中可能需要为每个任务准备不同的标签
        class_weights[task] = data_processor.compute_class_weights(y_train)

    # 7. 确定输出维度（支持多任务）
    output_dims = {}
    for task in tasks:
        # 简化处理，实际应用中可能需要为每个任务确定输出维度
        output_dims[task] = len(np.unique(y))

    # 8. 初始化模型管理器并训练
    logger.info("开始模型训练流程")
    model_manager = ModelManager(config, data_processor, logger)
    input_dim = x_train_proc.shape[1]

    # 分布式训练仅在主进程构建模型
    if not distributed or rank == 0:
        model_manager.build_model(input_dim, output_dims)

        best_score, best_auc = model_manager.train(
            train_set, val_set,
            class_weights,
            train_sampler=train_sampler
        )
        logger.info(f"模型训练完成 - 最佳验证评分: {best_score:.4f}, 最佳验证AUC: {best_auc:.4f}")

        # 9. 评估模型（仅主进程）
        logger.info("开始模型评估流程")
        evaluator = Evaluator(config, model_manager, data_processor, logger)
        model_manager.load_model(input_dim, output_dims)  # 加载最佳模型
        test_acc, test_auc = evaluator.evaluate(
            test_set,
            y_test,
            config.get('data.features')
        )

        logger.info(f"流水线完成 - 测试集准确率: {test_acc:.4f}, 测试集AUC: {test_auc:.4f}")
        logger.info("===== 企业级建模流水线结束 =====")


def main():
    config = ConfigLoader('config/pipeline_config.yaml')
    distributed = config.get('training.distributed', False)
    world_size = config.get('training.world_size', 1)

    if distributed and world_size > 1:
        # 启动分布式训练
        mp.spawn(run_pipeline, args=(world_size,), nprocs=world_size, join=True)
    else:
        # 单进程训练
        run_pipeline()


if __name__ == '__main__':
    # 支持多进程训练
    # mp.set_start_method('spawn')
    main()