import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
import csv
import datetime
import os
import shutil
import sys
from models.full_model import FullModel
from datasets.datasets_class import PairedNiftiDataset
from losses.supcon_loss import SupConLoss
from datasets.fold_split import get_fold_file_lists 
from config import *
from utils import plot_training_curves, plot_confusion_matrix, plot_roc_curve

project_root = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, project_root)

def create_experiment_dir():
    # 创建实验目录
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M")
    experiment_dir = os.path.join("./runs", f"experiment_{timestamp}")
    os.makedirs(experiment_dir, exist_ok=True)
    return experiment_dir

if __name__ == "__main__":
    # 1. 参数设置
    device = device if torch.cuda.is_available() else 'cpu'
    batch_size = batch_size
    lr = lr
    epochs = epochs
    temp = temp
    num_folds = num_folds
    num_classes = 3  # 假设是三分类任务

    brain_root = brain_root
    hippocampus_root = hippocampus_root

    # 2. 获取5折数据划分
    folds = get_fold_file_lists(brain_root, hippocampus_root, n_splits=num_folds)

    # 创建实验目录
    experiment_dir = create_experiment_dir()
    log_file = os.path.join(experiment_dir, "train_log.csv")
    
    # 写入CSV文件头
    with open(log_file, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(["Fold", "Epoch", "Train Loss", "Train Accuracy", 
                        "Val Loss", "Val Accuracy", 
                        "Precision", "Recall", "F1 Score", 
                        "Class 0 Precision", "Class 0 Recall", "Class 0 F1",
                        "Class 1 Precision", "Class 1 Recall", "Class 1 F1",
                        "Class 2 Precision", "Class 2 Recall", "Class 2 F1"])

    for fold, fold_data in enumerate(folds):
        train_b = fold_data['train_brain_files']
        train_h = fold_data['train_hipp_files']
        train_y = fold_data['train_labels']
        val_b = fold_data['val_brain_files']
        val_h = fold_data['val_hipp_files']
        val_y = fold_data['val_labels']
        print(f"\n=== Fold {fold + 1}/{num_folds} ===")

        # 创建模型检查点目录
        checkpoint_dir = os.path.join(experiment_dir, f"model_checkpoints_fold_{fold + 1}")
        os.makedirs(checkpoint_dir, exist_ok=True)
        best_model_path = os.path.join(checkpoint_dir, "best.pth")

        # 3. 构建数据集和加载器
        train_dataset = PairedNiftiDataset(
            brain_files=train_b,
            hipp_files=train_h,
            labels=train_y
        )

        val_dataset = PairedNiftiDataset(
            brain_files=val_b,
            hipp_files=val_h,
            labels=val_y
        )

        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

        # 4. 初始化模型、损失函数、优化器
        model = FullModel().to(device)
        contrastive_criterion = SupConLoss().to(device)
        classification_criterion = nn.CrossEntropyLoss().to(device)
        optimizer = optim.Adam(model.parameters(), lr=lr)

        # 初始化最佳准确率
        best_val_acc = 0.0

        # 5. 训练
        model.train()
        for epoch in range(epochs):
            print(f"\n[Fold {fold + 1}] Epoch {epoch + 1}/{epochs}")
            total_correct = 0
            total_samples = 0
            total_loss = 0.0

            # 用于存储所有预测和真实标签以计算指标
            all_preds = []
            all_labels = []

            loop = tqdm(train_loader, desc="Training", leave=False)
            for b, h, labels in loop:
                b, h, labels = b.to(device), h.to(device), labels.to(device)

                optimizer.zero_grad()
                projections, logits = model(b, h)

                # SupConLoss 输入为 [2B, D]
                features = torch.cat([projections, projections], dim=0)
                contrastive_labels = torch.cat([labels, labels], dim=0)

                loss_contrast = contrastive_criterion(features, contrastive_labels)
                loss_class = classification_criterion(logits, labels)
                hybrid_loss = temp * loss_contrast + (1 - temp) * loss_class

                hybrid_loss.backward()
                optimizer.step()

                _, preds = torch.max(logits, dim=1)
                total_correct += (preds == labels).sum().item()
                total_samples += labels.size(0)
                total_loss += hybrid_loss.item()

                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

                loop.set_postfix({
                    "SupCon": f"{loss_contrast.item():.4f}",
                    "CE": f"{loss_class.item():.4f}",
                    "Hybrid": f"{hybrid_loss.item():.4f}"
                })

            acc = total_correct / total_samples
            avg_loss = total_loss / len(train_loader)

            # 计算分类指标
            precision, recall, f1, _ = precision_recall_fscore_support(
                all_labels, all_preds, 
                average='macro', 
                zero_division=0
            )
            class_metrics = precision_recall_fscore_support(
                all_labels, all_preds, 
                labels=[0, 1, 2], 
                average=None, 
                zero_division=0
            )

            print(f"[Fold {fold+1}] Train Loss: {avg_loss:.4f}, Train Accuracy: {acc:.4f}")

            # 6. 验证（可选）
            model.eval()
            val_correct = 0
            val_samples = 0
            val_loss = 0.0
            val_all_preds = []
            val_all_labels = []

            with torch.no_grad():
                for b, h, labels in val_loader:
                    b, h, labels = b.to(device), h.to(device), labels.to(device)
                    projections, logits = model(b, h)
                    
                    # 计算验证损失
                    features = torch.cat([projections, projections], dim=0)
                    contrastive_labels = torch.cat([labels, labels], dim=0)
                    loss_contrast = contrastive_criterion(features, contrastive_labels)
                    loss_class = classification_criterion(logits, labels)
                    hybrid_loss = temp * loss_contrast + (1 - temp) * loss_class
                    val_loss += hybrid_loss.item()
                    
                    # 计算准确率
                    preds = torch.argmax(logits, dim=1)
                    val_correct += (preds == labels).sum().item()
                    val_samples += labels.size(0)

                    val_all_preds.extend(preds.cpu().numpy())
                    val_all_labels.extend(labels.cpu().numpy())

            val_acc = val_correct / val_samples
            val_loss /= len(val_loader)

            # 计算验证分类指标
            val_precision, val_recall, val_f1, _ = precision_recall_fscore_support(
                val_all_labels, val_all_preds, 
                average='macro', 
                zero_division=0
            )
            val_class_metrics = precision_recall_fscore_support(
                val_all_labels, val_all_preds, 
                labels=[0, 1, 2], 
                average=None, 
                zero_division=0
            )

            print(f"[Fold {fold+1}] Val Loss: {val_loss:.4f}, Val Accuracy: {val_acc:.4f}")

            # 如果当前验证准确率是最佳准确率，保存模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                torch.save(model.state_dict(), best_model_path)
                print(f"Best model saved to {best_model_path}")

            # 将结果写入CSV文件
            with open(log_file, 'a', newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow([
                    fold + 1, epoch + 1, avg_loss, acc, val_loss, val_acc,
                    precision, recall, f1,
                    class_metrics[0][0], class_metrics[1][0], class_metrics[2][0],
                    class_metrics[0][1], class_metrics[1][1], class_metrics[2][1],
                    class_metrics[0][2], class_metrics[1][2], class_metrics[2][2]
                ])
    
    # 训练完成后，绘制所有图表
    for fold in range(num_folds):
        # 加载最佳模型
        checkpoint_dir = os.path.join(experiment_dir, f"model_checkpoints_fold_{fold + 1}")
        best_model_path = os.path.join(checkpoint_dir, "best.pth")
        
        # 重新构建模型
        model = FullModel().to(device)
        model.load_state_dict(torch.load(best_model_path))
        
        # 重新构建验证集加载器
        fold_data = folds[fold]
        val_b = fold_data['val_brain_files']
        val_h = fold_data['val_hipp_files']
        val_y = fold_data['val_labels']
        
        val_dataset = PairedNiftiDataset(
            brain_files=val_b,
            hipp_files=val_h,
            labels=val_y
        )
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
        
        # 绘制图表
        plot_training_curves(experiment_dir, fold + 1)
        plot_confusion_matrix(model, val_loader, device, experiment_dir, fold + 1)
        plot_roc_curve(model, val_loader, device, experiment_dir, fold + 1)