import pandas as pd
import os
import time
import torch
import wandb
import numpy as np
from sklearn.model_selection import train_test_split
# 导入数据加载相关模块
from training.data_loader import get_kfold_files, create_dataloaders, get_domain_files
# 导入训练器
from training.trainer import Trainer
# 导入工具函数
from training.utils.data_util import disable_benchmark_cudnn, get_optimal_num_workers
from training.utils.config_util import get_model, get_optimizer, get_loss_function, get_lr_scheduler
from training.utils.checkpoint_util import check_global_checkpoint, delete_global_checkpoint, is_fold_completed
from training.utils.common_util import set_random_seed
    

def train_fold(config, fold_id, train_files, val_files, domain_train_files=None, domain_val_files=None, load_checkpoint=False):
    """
    训练单个折（fold），支持领域适应训练
    
    参数:
        config (dict): 配置字典，包含模型、训练器和数据加载器的所有配置
        fold_id (int): 当前折的索引（从0开始）
        train_files (list): 训练文件路径列表
        val_files (list): 验证文件路径列表
        domain_train_files (list, optional): 领域适应的训练文件路径列表
        domain_val_files (list, optional): 领域适应的验证文件路径列表
        load_checkpoint (bool, optional): 是否从检查点加载模型继续训练
    
    返回:
        dict: 包含当前折训练结果的字典，包括准确率、F1分数、Kappa系数等指标
    """
    # 检查该折是否已经完成训练（避免重复训练）
    if is_fold_completed(config, fold_id):
        print(f"Fold {fold_id + 1} already completed, skipping...")
        # 从保存的结果文件中加载已完成折的结果
        fold_dir = os.path.join(config['trainer']['save_dir'], f'fold_{fold_id}')
        results_file = os.path.join(fold_dir, 'results.txt')
        
        # 解析results.txt文件获取各项性能指标
        with open(results_file, 'r') as f:
            lines = f.readlines()
            
        # 提取总体指标：准确率、F1分数、Kappa系数
        accuracy = float(lines[1].split(': ')[1].strip())
        f1_score = float(lines[2].split(': ')[1].strip())
        kappa = float(lines[3].split(': ')[1].strip())
        
        # 提取每个类别的F1分数（假设从第7行开始）
        per_class = {}
        for line in lines[6:]:  # 从第7行开始是每个类别的F1分数
            if ':' in line:
                parts = line.split(':')
                class_name = parts[0].strip()
                score = float(parts[1].strip())
                per_class[class_name] = score
        
        # 构建结果字典
        fold_results = {
            'accuracy': accuracy,
            'f1_score': f1_score,
            'kappa': kappa,
            'per_class': per_class
        }
        
        # 加载混淆矩阵（如果存在）
        cm_file = os.path.join(fold_dir, 'confusion_matrix.csv')
        if os.path.exists(cm_file):
            fold_results['confusion_matrix'] = np.loadtxt(cm_file, delimiter=',')
        
        # 尝试加载目标域混淆矩阵（领域适应模式下）
        target_cm_file = os.path.join(fold_dir, 'target_confusion_matrix.csv')
        if os.path.exists(target_cm_file):
            fold_results['target_confusion_matrix'] = np.loadtxt(target_cm_file, delimiter=',')
            # 尝试从结果文件中解析目标域的性能指标
            for i, line in enumerate(lines):
                if 'Target Accuracy:' in line:
                    fold_results['target_accuracy'] = float(line.split(': ')[1].strip())
                elif 'Target F1 Score:' in line:
                    fold_results['target_f1_score'] = float(line.split(': ')[1].strip())
                elif 'Target Kappa:' in line:
                    fold_results['target_kappa'] = float(line.split(': ')[1].strip())
        
        return fold_results

    # 从配置中获取统计文件路径（用于预加载文件阶段数量信息）
    stats_file = config['data_loader'].get('stats_file', None)
    print(f"准备创建数据加载器，处理 {len(train_files)} 个训练文件和 {len(val_files)} 个验证文件")
    if stats_file:
        print(f"将使用预计算的统计文件: {stats_file}")
    
    # 获取最佳worker数量
    optimal_workers = get_optimal_num_workers()
    print(f"使用最佳worker数量: {optimal_workers}")
    
    # 创建训练和验证数据加载器
    train_loader, val_loader = create_dataloaders(
        train_files, val_files,
        batch_size=config['data_loader']['batch_size'],
        num_workers=optimal_workers,
        pin_memory=config['data_loader']['pin_memory'],
        stats_file=config.get('data_loader', {}).get('stats_file', None)
    )
    
    # 如果启用领域适应，创建领域数据加载器
    domain_train_loader = None
    domain_val_loader = None
    if domain_train_files is not None and domain_val_files is not None:
        print(f"为目标域数据创建加载器，处理 {len(domain_train_files)} 个训练文件和 {len(domain_val_files)} 个验证文件")
        # 创建目标域训练和验证数据加载器
        domain_train_loader, domain_val_loader = create_dataloaders(
            domain_train_files, domain_val_files,
            batch_size=config['data_loader']['batch_size'],
            num_workers=optimal_workers,
            pin_memory=config['data_loader']['pin_memory'],
            stats_file=config.get('data_loader', {}).get('stats_file', None)
        )

    # 初始化模型训练所需的核心组件
    try:
        # 根据配置创建模型
        model = get_model(config)
        # 创建损失函数，从统计文件获取权重信息，不依赖数据集
        criterion = get_loss_function(config, train_files=train_files)
        # 创建优化器
        optimizer = get_optimizer(model, config)
        # 创建学习率调度器
        scheduler = get_lr_scheduler(optimizer, config)
    except ValueError as e:
        print(f"初始化组件失败: {e}")
        raise  # 向上抛出异常，中断训练过程

    # 创建训练器实例
    # 注意参数顺序：scheduler应该是最后一个参数
    trainer = Trainer(
        model,                     # 模型实例
        criterion,                 # 损失函数
        optimizer,                 # 优化器
        config,                    # 配置字典
        fold_id,                   # 当前折索引
        load_checkpoint=load_checkpoint,  # 是否加载检查点
        scheduler=scheduler        # 学习率调度器
    )
    
    # 根据是否启用领域适应，选择不同的训练模式
    if config.get('domain_adaptation', {}).get('enabled', False):
        # 启用领域适应模式
        # 传入领域训练和验证数据加载器
        fold_results = trainer.train(train_loader, val_loader, domain_train_loader, domain_val_loader)
    else:
        # 常规训练模式
        fold_results = trainer.train(train_loader, val_loader)
    
    # 打印当前折的最佳性能指标
    print(f"Fold {fold_id + 1} Best Accuracy: {fold_results['accuracy']:.4f}")
    print(f"Fold {fold_id + 1} Best F1: {fold_results['f1_score']:.4f}")
    print(f"Fold {fold_id + 1} Best Kappa: {fold_results['kappa']:.4f}")

    return fold_results


def save_overall_results(config, fold_results, total_time):
    """
    保存K折交叉验证的总体结果，支持领域适应场景
    
    参数:
        config (dict): 配置字典
        fold_results (list): 包含每个折训练结果的列表
        total_time (float): 总训练时间（秒）
    """
    # 计算总体结果（所有折的平均值和标准差）
    print("\n===== K-Fold Cross Validation Results =====")
    overall_metrics = {
        'accuracy': np.mean([f['accuracy'] for f in fold_results]),           # 平均准确率
        'accuracy_std': np.std([f['accuracy'] for f in fold_results]),       # 准确率标准差
        'f1_score': np.mean([f['f1_score'] for f in fold_results]),           # 平均F1分数
        'f1_std': np.std([f['f1_score'] for f in fold_results]),             # F1分数标准差
        'kappa': np.mean([f['kappa'] for f in fold_results]),                 # 平均Kappa系数
        'kappa_std': np.std([f['kappa'] for f in fold_results])               # Kappa系数标准差
    }

    # 计算每个类别的平均F1分数和标准差
    # 从第一个折的结果中获取类别名称
    classes = fold_results[0]['per_class'].keys()
    # 计算每个类别的平均F1分数
    per_class_avg = {
        cls: np.mean([f['per_class'][cls] for f in fold_results])
        for cls in classes
    }
    # 计算每个类别的F1分数标准差
    per_class_std = {
        cls: np.std([f['per_class'][cls] for f in fold_results])
        for cls in classes
    }

    # 打印源域总体结果（平均值 ± 标准差）
    print(f"Source Domain Results:")
    print(f"Mean Accuracy: {overall_metrics['accuracy']:.4f} ± {overall_metrics['accuracy_std']:.4f}")
    print(f"Mean F1 Score: {overall_metrics['f1_score']:.4f} ± {overall_metrics['f1_std']:.4f}")
    print(f"Mean Kappa: {overall_metrics['kappa']:.4f} ± {overall_metrics['kappa_std']:.4f}")
    
    # 打印每个类别的平均F1分数
    print("\nPer Class Mean F1 Scores:")
    for cls in classes:
        print(f"{cls}: {per_class_avg[cls]:.4f} ± {per_class_std[cls]:.4f}")
    
    # 检查是否有目标域结果（领域适应模式下）
    has_target_results = all('target_accuracy' in f for f in fold_results)
    target_metrics = None
    
    if has_target_results:
        # 计算目标域总体结果（平均值和标准差）
        target_metrics = {
            'target_accuracy': np.mean([f['target_accuracy'] for f in fold_results]),         # 目标域平均准确率
            'target_accuracy_std': np.std([f['target_accuracy'] for f in fold_results]),     # 目标域准确率标准差
            'target_f1_score': np.mean([f['target_f1_score'] for f in fold_results]),         # 目标域平均F1分数
            'target_f1_std': np.std([f['target_f1_score'] for f in fold_results]),           # 目标域F1分数标准差
            'target_kappa': np.mean([f['target_kappa'] for f in fold_results]),               # 目标域平均Kappa系数
            'target_kappa_std': np.std([f['target_kappa'] for f in fold_results])             # 目标域Kappa系数标准差
        }
        
        # 打印目标域总体结果
        print("\n===== Target Domain Results =====")
        print(f"Mean Accuracy: {target_metrics['target_accuracy']:.4f} ± {target_metrics['target_accuracy_std']:.4f}")
        print(f"Mean F1 Score: {target_metrics['target_f1_score']:.4f} ± {target_metrics['target_f1_std']:.4f}")
        print(f"Mean Kappa: {target_metrics['target_kappa']:.4f} ± {target_metrics['target_kappa_std']:.4f}")


    # 准备wandb日志记录的数据
    log_dict = {
        "final/mean_accuracy": overall_metrics['accuracy'],     # 平均准确率
        "final/mean_f1": overall_metrics['f1_score'],           # 平均F1分数
        "final/mean_kappa": overall_metrics['kappa']            # 平均Kappa系数
    }
    
    # 如果有目标域结果，也记录到wandb
    if has_target_results:
        log_dict.update({
            "final/target_mean_accuracy": target_metrics['target_accuracy'],   # 目标域平均准确率
            "final/target_mean_f1": target_metrics['target_f1_score'],         # 目标域平均F1分数
            "final/target_mean_kappa": target_metrics['target_kappa']          # 目标域平均Kappa系数
        })
    
    # 记录到wandb
    wandb.log(log_dict)

    # 保存所有折的结果到CSV文件
    results_dir = os.path.join(config['trainer']['save_dir'])
    # 确保结果目录存在
    os.makedirs(results_dir, exist_ok=True)

    # 保存每个折的详细结果
    fold_df = pd.DataFrame([
        {
            'fold': i,                              # 折索引
            'accuracy': f['accuracy'],               # 准确率
            'f1_score': f['f1_score'],               # F1分数
            'kappa': f['kappa'],                     # Kappa系数
            **f['per_class']                         # 每个类别的F1分数（展开字典）
        }
        for i, f in enumerate(fold_results)
    ])
    # 保存到CSV文件
    fold_df.to_csv(os.path.join(results_dir, 'all_folds_results.csv'), index=False)

    # 准备总体结果数据（所有折的汇总统计）
    overall_data = {
        'mean_accuracy': overall_metrics['accuracy'],                     # 平均准确率
        'accuracy_std': overall_metrics['accuracy_std'],                   # 准确率标准差
        'mean_f1': overall_metrics['f1_score'],                           # 平均F1分数
        'f1_std': overall_metrics['f1_std'],                               # F1分数标准差
        'mean_kappa': overall_metrics['kappa'],                           # 平均Kappa系数
        'kappa_std': overall_metrics['kappa_std'],                         # Kappa系数标准差
        # 每个类别的平均F1分数
        **{f'{cls}_mean': per_class_avg[cls] for cls in classes},
        # 每个类别的F1分数标准差
        **{f'{cls}_std': per_class_std[cls] for cls in classes},
        'total_time': total_time                                          # 总训练时间
    }
    
    # 如果有目标域结果，添加到总体结果中
    if has_target_results:
        overall_data.update({
            'target_mean_accuracy': target_metrics['target_accuracy'],         # 目标域平均准确率
            'target_accuracy_std': target_metrics['target_accuracy_std'],       # 目标域准确率标准差
            'target_mean_f1': target_metrics['target_f1_score'],               # 目标域平均F1分数
            'target_f1_std': target_metrics['target_f1_std'],                   # 目标域F1分数标准差
            'target_mean_kappa': target_metrics['target_kappa'],               # 目标域平均Kappa系数
            'target_kappa_std': target_metrics['target_kappa_std']             # 目标域Kappa系数标准差
        })
    
    # 保存总体结果到CSV文件
    overall_df = pd.DataFrame([overall_data])
    overall_df.to_csv(os.path.join(results_dir, 'overall_results.csv'), index=False)



def main(config):
    """
    主函数，执行K折交叉验证训练流程
    
    参数:
        config (dict): 配置字典，包含所有训练相关的设置
    """
    # 设置随机种子，确保实验的可复现性
    # 从配置中获取种子值，如果没有则使用默认值42
    seed = config.get('random_seed', 42)
    set_random_seed(seed)

    # # 启用cuDNN基准测试（当输入大小固定时可提升性能）
    # if config.get('enable_cudnn_benchmark', False):
    #     enable_benchmark_cudnn()
    disable_benchmark_cudnn()
    # 检查是否使用领域适应训练模式
    use_domain_adaptation = config.get('domain_adaptation', {}).get('enabled', False)
    
    if use_domain_adaptation:
        # 领域适应模式：分别处理源域和目标域数据
        print("启用领域适应训练模式")
        # 获取源域和目标域的数据文件
        _, target_files = get_domain_files(
            config['data_loader']['source_data_dir'],  # 源域数据路径
            config['data_loader']['target_data_dir'],  # 目标域数据路径
            max_files_source=config['data_loader'].get('max_files'),  # 源域最大文件数限制
            max_files_target=config['data_loader'].get('max_files'),  # 目标域最大文件数限制
            add_random=config['data_loader'].get('add_random', False)  # 是否随机选择文件
        )
        
        # 对源域数据进行K折划分
        source_folds = get_kfold_files(
            config['data_loader']['source_data_dir'],
            config['data_loader']['num_folds'],
            max_files=config['data_loader'].get('max_files'),
            add_random=config['data_loader'].get('add_random', False)
        )
        
        # 为目标域数据添加训练/验证划分
        target_train_files, target_val_files = train_test_split(
            target_files, test_size=0.2, random_state=seed
        )
        print(f"目标域数据划分: {len(target_train_files)} 训练样本, {len(target_val_files)} 验证样本")
        folds = source_folds  # 使用源域的K折划分
    else:
        # 常规K折交叉验证模式
        print("使用常规K折交叉验证模式")
        folds = get_kfold_files(
            config['data_loader']['data_dir'],      # 数据目录
            config['data_loader']['num_folds'],     # 折数
            max_files=config['data_loader'].get('max_files'),  # 最大文件数限制
            add_random=config['data_loader'].get('add_random', False)  # 是否随机选择文件
        )
        target_train_files = None
        target_val_files = None

    # 存储各折训练结果的列表
    fold_results = []
    # 记录开始时间，用于计算总训练时间
    start_time = time.time()

    # 创建wandb表格来可视化所有折的结果
    columns = ["Fold", "Source Accuracy", "Source F1 Score", "Source Kappa"]
    # 如果是领域适应模式，添加目标域指标列
    if use_domain_adaptation:
        columns.extend(["Target Accuracy", "Target F1 Score", "Target Kappa"])
    fold_metrics_table = wandb.Table(columns=columns)
    
    # 检查全局检查点，确定从哪个折开始训练（支持断点续训）
    start_fold, _ = check_global_checkpoint(config)
    start_idx = start_fold if start_fold is not None else 0  # 默认从第0个折开始
    
    # 遍历每个折进行训练
    for fold_id in range(start_idx, len(folds)):
        # 获取当前折的训练和验证文件
        train_files, val_files = folds[fold_id]
        print(f"\n===== Fold {fold_id + 1}/{config['data_loader']['num_folds']} =====")
        
        # 对于恢复的折，需要加载检查点继续训练
        load_checkpoint = (fold_id == start_fold and start_fold is not None)
        
        if use_domain_adaptation:
            # 领域适应模式：传递源域和目标域数据
            results = train_fold(config, fold_id, train_files, val_files, 
                               target_train_files, target_val_files, 
                               load_checkpoint=load_checkpoint)
            # 将每个折的结果添加到表格中（包括源域和目标域指标）
            fold_metrics_table.add_data(
                fold_id + 1, 
                results['accuracy'],             # 源域准确率
                results['f1_score'],             # 源域F1分数
                results['kappa'],                # 源域Kappa系数
                results.get('target_accuracy', 0),  # 目标域准确率（默认值0）
                results.get('target_f1_score', 0),  # 目标域F1分数（默认值0）
                results.get('target_kappa', 0)      # 目标域Kappa系数（默认值0）
            )
        else:
            # 常规训练模式：只传递训练和验证数据
            results = train_fold(config, fold_id, train_files, val_files, load_checkpoint=load_checkpoint)
            # 将每个折的结果添加到表格中
            fold_metrics_table.add_data(fold_id + 1, results['accuracy'], results['f1_score'], results['kappa'])
        
        # 保存当前折的结果
        fold_results.append(results)

    # 所有折完成后，将汇总表格记录到wandb
    wandb.log({"final/fold_metrics_summary": fold_metrics_table})


    # 计算总训练时间
    total_time = time.time() - start_time
    # 转换为小时、分钟、秒格式
    hours, rem = divmod(total_time, 3600)
    minutes, seconds = divmod(rem, 60)
    print(f"\nTotal training time: {int(hours):02d}:{int(minutes):02d}:{seconds:.2f}")

    # 保存所有折的总体结果
    save_overall_results(config, fold_results, total_time)

    # 训练完成后删除全局检查点（清理工作）
    delete_global_checkpoint(config)

    # 完成wandb运行，结束实验记录
    wandb.finish()
