import pandas as pd
import numpy as np
import config
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from tqdm import tqdm
import torch
from torch.utils.data import TensorDataset, DataLoader
import time


matplotlib.rcParams['font.family'] = 'SimHei'  # 例如使用黑体
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def change_df_sec(ser, reference_time=None):
    """
    将时间转换为以最开始时间为0计算的秒数
    Args:
        ser (_type_): _description_
        reference_time (_type_, optional): _description_. Defaults to None.

    Returns:
        _type_: _description_
    """
    ser = pd.to_datetime(ser)
    reference_time = ser.min() if reference_time is None else reference_time
    ser = (ser - reference_time).dt.total_seconds()
    return ser


def create_time_seq(X_scaled, y, time_steps=config.TIME_STEPS):
    """
    创建时间序列
    Args:
        X_scaled (_type_): 已经标准化的特征
        y (_type_): 标签
        time_steps (_type_, optional): 时间步长. Defaults to config.TIME_STEPS.
    Returns:
        _type_: _description_
    """
    sequences = []
    labels = []
    for i in range(len(X_scaled)):
        if i+time_steps < len(X_scaled):
            sequences.append(X_scaled[i:i+time_steps])
            labels.append(y[i+time_steps])
        else:
            sequences.append(X_scaled[i-time_steps + 1:i+1])
            labels.append(y[i])
    return np.array(sequences), np.array(labels)


def evaluate_model_on_test(model,
                           test_loader,
                           train_losses, test_losses,
                           train_accuracies, test_accuracies
                           ):
    """
    评估模型在测试集上的性能
    Args:
        model (_type_): _description_
        test_loader (_type_): _description_
        train_losses (_type_): _description_
        test_losses (_type_): _description_
        train_accuracies (_type_): _description_
        test_accuracies (_type_): _description_
    """
    # 绘制损失和准确率曲线
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Test Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='Train Accuracy')
    plt.plot(test_accuracies, label='Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Training and Test Accuracy')
    plt.legend()
    plt.tight_layout()
    plt.show()

    # 在测试集上评估模型
    model.eval()
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for X_batch, y_batch in test_loader:
            outputs = model(X_batch)
            _, predicted = torch.max(outputs.data, 1)
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(y_batch.cpu().numpy())

    # 打印混淆矩阵和分类报告
    print("Confusion Matrix:")
    conf_matrix = confusion_matrix(all_labels, all_preds)
    print(conf_matrix)

    print("\nClassification Report:")
    print(classification_report(all_labels, all_preds))

    # 准确率
    accuracy = np.mean(np.array(all_preds) == np.array(all_labels))
    return accuracy


def evaluate_model(model, X, y, time_steps=config.TIME_STEPS, batch_size=config.BATCH_SIZE, save_path=None):
    """
    Evaluate a PyTorch model on time series data and generate comprehensive evaluation plots.

    Parameters:
    -----------
    model : torch.nn.Module
        The PyTorch model to evaluate (must be already trained)
    device : torch.device
        The device to run evaluation on (CPU or GPU)
    X : numpy.ndarray
        Feature data
    y : numpy.ndarray
        Target labels
    time_steps : int, default=5
        Number of time steps for sequence data
    batch_size : int, default=128
        Batch size for evaluation
    save_path : str, optional
        Path to save the evaluation plot (None for no saving)

    Returns:
    --------
    dict
        Dictionary containing evaluation metrics
    """

    # Set model to evaluation mode
    model.eval()
    all_preds = []
    all_labels = []
    all_probs = []  # Store prediction probabilities for ROC curve

    # Scale the data
    scaler = MinMaxScaler()
    X_scaled = scaler.fit_transform(X)

    # Create time series data
    sequences = []
    labels = []
    for i in range(len(X_scaled) - time_steps):
        sequences.append(X_scaled[i:i+time_steps])
        labels.append(y[i+time_steps])

    X_seq = np.array(sequences)
    y_seq = np.array(labels)
    X_train, X_test, y_train, y_test = train_test_split(
        X_seq, y_seq, test_size=0.2, random_state=42)

    # Convert to PyTorch tensors and move to GPU
    X_test = torch.tensor(X_test, dtype=torch.float32).to(device)
    y_test = torch.tensor(y_test, dtype=torch.long).to(device)

    # Create test data loader
    test_dataset = TensorDataset(X_test, y_test)
    test_loader = DataLoader(
        test_dataset, batch_size=batch_size, shuffle=False)

    # Use tqdm to show progress
    progress_bar = tqdm(test_loader, desc="Testing")
    correct = 0
    total = 0

    with torch.no_grad():
        for X_batch, y_batch in progress_bar:
            outputs = model(X_batch)
            _, predicted = torch.max(outputs.data, 1)

            # Calculate batch accuracy for tqdm display
            batch_correct = (predicted == y_batch).sum().item()
            batch_total = y_batch.size(0)
            correct += batch_correct
            total += batch_total

            # Update progress bar with current accuracy
            progress_bar.set_postfix(accuracy=f"{100 * correct / total:.2f}%")

            # Store predictions and probabilities
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(y_batch.cpu().numpy())

            # Check for NaN values and replace them if needed
            valid_outputs = torch.where(torch.isnan(
                outputs), torch.zeros_like(outputs), outputs)
            all_probs.extend(torch.softmax(valid_outputs, dim=1).cpu().numpy())

    # Convert to numpy arrays for metrics calculation
    all_preds = np.array(all_preds)
    all_labels = np.array(all_labels)
    all_probs = np.array(all_probs)

    # Print confusion matrix and classification report
    print("Confusion Matrix:")
    conf_matrix = confusion_matrix(all_labels, all_preds)
    print(conf_matrix)

    print("\nClassification Report:")
    report = classification_report(all_labels, all_preds)
    print(report)

    # Create a figure with multiple subplots
    plt.figure(figsize=(20, 15))

    # Plot confusion matrix
    plt.subplot(2, 2, 1)
    n_classes = len(np.unique(all_labels))
    class_labels = [f'Class {i}' for i in range(n_classes)]
    sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_labels, yticklabels=class_labels)
    plt.title('混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')

    # Plot ROC curve (one-vs-rest)
    plt.subplot(2, 2, 2)
    for i in range(n_classes):
        if i < all_probs.shape[1]:  # Check if class exists in predictions
            # Calculate ROC curve and ROC area for each class
            fpr, tpr, _ = roc_curve(all_labels == i, all_probs[:, i])
            roc_auc = auc(fpr, tpr)
            plt.plot(fpr, tpr, lw=2, label=f'类别 {i} (AUC = {roc_auc:.2f})')

    plt.plot([0, 1], [0, 1], 'k--', lw=2)
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假阳性率')
    plt.ylabel('真阳性率')
    plt.title('ROC曲线')
    plt.legend(loc="lower right")

    # Plot precision-recall curve
    plt.subplot(2, 2, 3)
    for i in range(n_classes):
        if i < all_probs.shape[1]:  # Check if class exists in predictions
            precision, recall, _ = precision_recall_curve(
                all_labels == i, all_probs[:, i])
            avg_precision = average_precision_score(
                all_labels == i, all_probs[:, i])
            plt.plot(recall, precision, lw=2,
                     label=f'类别 {i} (AP = {avg_precision:.2f})')

    plt.xlabel('召回率')
    plt.ylabel('精确率')
    plt.title('精确率-召回率曲线')
    plt.legend(loc="best")

    # Class distribution
    plt.subplot(2, 2, 4)
    class_counts = np.bincount(all_labels)
    sns.barplot(x=np.arange(len(class_counts)), y=class_counts)
    plt.title('测试集类别分布')
    plt.xlabel('类别')
    plt.ylabel('样本数量')

    plt.tight_layout()
    if save_path:
        plt.savefig(save_path, dpi=300)
    plt.show()

    # Calculate overall accuracy
    accuracy = correct / total

    # Return evaluation metrics
    return {
        "accuracy": accuracy,
        "confusion_matrix": conf_matrix,
        "classification_report": report,
        "all_labels": all_labels,
        "all_predictions": all_preds,
        "all_probabilities": all_probs
    }


def evaluate_model_on_test_cv(model, test_loader, train_losses, val_losses, train_accuracies, val_accuracies, save_path=None):
    """
    评估通过交叉验证训练的模型在测试集上的性能

    Args:
        model (torch.nn.Module): 训练好的模型
        test_loader (DataLoader): 测试数据加载器
        train_losses (list): 所有折的训练损失，形状为 [n_folds, n_epochs]
        val_losses (list): 所有折的验证损失，形状为 [n_folds, n_epochs]
        train_accuracies (list): 所有折的训练准确率，形状为 [n_folds, n_epochs]
        val_accuracies (list): 所有折的验证准确率，形状为 [n_folds, n_epochs]
        save_path (str, optional): 保存图表的路径

    Returns:
        float: 测试集上的准确率
    """

    # 计算每个epoch的平均指标和标准差
    mean_train_losses = np.mean(train_losses, axis=0)
    std_train_losses = np.std(train_losses, axis=0)
    mean_val_losses = np.mean(val_losses, axis=0)
    std_val_losses = np.std(val_losses, axis=0)

    mean_train_accuracies = np.mean(train_accuracies, axis=0)
    std_train_accuracies = np.std(train_accuracies, axis=0)
    mean_val_accuracies = np.mean(val_accuracies, axis=0)
    std_val_accuracies = np.std(val_accuracies, axis=0)

    # 绘制损失和准确率曲线（带有标准差区域）
    plt.figure(figsize=(16, 6))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    epochs = range(1, len(mean_train_losses) + 1)
    plt.plot(epochs, mean_train_losses, 'b-', label='训练损失')
    plt.fill_between(epochs, mean_train_losses - std_train_losses,
                     mean_train_losses + std_train_losses, alpha=0.2, color='b')
    plt.plot(epochs, mean_val_losses, 'r-', label='验证损失')
    plt.fill_between(epochs, mean_val_losses - std_val_losses,
                     mean_val_losses + std_val_losses, alpha=0.2, color='r')
    plt.xlabel('轮次')
    plt.ylabel('损失')
    plt.title('交叉验证训练和验证损失')
    plt.legend()
    plt.grid(True, linestyle='--', alpha=0.7)

    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, mean_train_accuracies, 'g-', label='训练准确率')
    plt.fill_between(epochs, mean_train_accuracies - std_train_accuracies,
                     mean_train_accuracies + std_train_accuracies, alpha=0.2, color='g')
    plt.plot(epochs, mean_val_accuracies, 'y-', label='验证准确率')
    plt.fill_between(epochs, mean_val_accuracies - std_val_accuracies,
                     mean_val_accuracies + std_val_accuracies, alpha=0.2, color='y')
    plt.xlabel('轮次')
    plt.ylabel('准确率 (%)')
    plt.title('交叉验证训练和验证准确率')
    plt.legend()
    plt.grid(True, linestyle='--', alpha=0.7)

    plt.tight_layout()

    # 在测试集上评估模型
    print("\n在测试集上评估最佳模型性能...")
    model.eval()
    all_preds = []
    all_labels = []
    all_probs = []
    correct = 0
    total = 0

    with torch.no_grad():
        for X_batch, y_batch in tqdm(test_loader, desc="测试集评估"):
            outputs = model(X_batch)
            _, predicted = torch.max(outputs.data, 1)

            total += y_batch.size(0)
            correct += (predicted == y_batch).sum().item()

            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(y_batch.cpu().numpy())

            # 存储预测概率
            probs = torch.softmax(outputs, dim=1)
            all_probs.extend(probs.cpu().numpy())

    # 计算测试准确率
    test_accuracy = 100 * correct / total
    print(f"\n测试集准确率: {test_accuracy:.2f}%")

    # 打印混淆矩阵
    conf_matrix = confusion_matrix(all_labels, all_preds)
    print("\n混淆矩阵:")
    print(conf_matrix)

    # 打印分类报告
    print("\n分类报告:")
    class_report = classification_report(all_labels, all_preds)
    print(class_report)

    # 可视化混淆矩阵
    plt.figure(figsize=(10, 8))
    n_classes = len(np.unique(all_labels))
    class_labels = [f'类别 {i}' for i in range(n_classes)]

    sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_labels, yticklabels=class_labels)
    plt.title('测试集混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')

    if save_path:
        # 保存交叉验证学习曲线
        plt.figure(1)
        plt.savefig(f"{save_path}_learning_curves.png",
                    dpi=300, bbox_inches='tight')
        # 保存混淆矩阵
        plt.figure(2)
        plt.savefig(f"{save_path}_confusion_matrix.png",
                    dpi=300, bbox_inches='tight')

    plt.show()

    # 如果测试集类别分布不平衡，绘制ROC曲线和PR曲线
    if n_classes <= 10:  # 仅当类别不太多时绘制
        all_labels = np.array(all_labels)
        all_probs = np.array(all_probs)

        plt.figure(figsize=(16, 6))

        # ROC曲线
        plt.subplot(1, 2, 1)
        for i in range(n_classes):
            if i < all_probs.shape[1]:  # 确保类别存在
                fpr, tpr, _ = roc_curve(all_labels == i, all_probs[:, i])
                roc_auc = auc(fpr, tpr)
                plt.plot(fpr, tpr, lw=2, label=f'类别 {i} (AUC = {roc_auc:.2f})')

        plt.plot([0, 1], [0, 1], 'k--', lw=2)
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('假阳性率')
        plt.ylabel('真阳性率')
        plt.title('ROC曲线')
        plt.legend(loc="lower right")

        # PR曲线
        plt.subplot(1, 2, 2)
        for i in range(n_classes):
            if i < all_probs.shape[1]:
                precision, recall, _ = precision_recall_curve(
                    all_labels == i, all_probs[:, i])
                avg_precision = average_precision_score(
                    all_labels == i, all_probs[:, i])
                plt.plot(recall, precision, lw=2,
                         label=f'类别 {i} (AP = {avg_precision:.2f})')

        plt.xlabel('召回率')
        plt.ylabel('精确率')
        plt.title('精确率-召回率曲线')
        plt.legend(loc="best")

        plt.tight_layout()

        if save_path:
            plt.savefig(f"{save_path}_pr_roc_curves.png",
                        dpi=300, bbox_inches='tight')

        plt.show()

    return test_accuracy


def train_test_step_split_by_ratio(X, y, test_size=config.TEST_SIZE):
    """
    按比例划分数据集，保持顺序不变。

    :param X: list or np.array, 特征数据集 (输入)
    :param y: list or np.array, 标签数据集 (输出)
    :param test_size: float, 测试集占比 (0~1)
    :return: (X_train, X_test, y_train, y_test) 按比例划分的训练集和测试集
    """
    if not (0 < test_size < 1):
        raise ValueError("test_size 必须是 (0,1) 之间的小数，例如 0.2 代表 20%")

    X, y = np.array(X), np.array(y)  # 转换为 NumPy 数组，方便索引操作
    num_test = int(len(X) * test_size)  # 计算测试集大小

    if num_test == 0:
        raise ValueError("测试集大小太小，请增加数据集长度或调整 test_size")

    # 计算步长 (step)，尽可能均匀抽取测试集
    step = max(1, len(X) // num_test)

    test_indices = np.arange(0, len(X), step)[:num_test]  # 选取测试集索引
    train_indices = np.delete(np.arange(len(X)), test_indices)  # 剩余索引为训练集

    X_train, X_test = X[train_indices], X[test_indices]
    y_train, y_test = y[train_indices], y[test_indices]

    return X_train, X_test, y_train, y_test


def process_before_train(X, y, test_size=config.TEST_SIZE):
    """
    数据预处理
    Args:
        X (_type_): 特征
        y (_type_): 标签
        test_size (_type_, optional): 测试集大小. Defaults to config.TEST_SIZE.
    Returns:
        _type_: _description_
    """
    # 标准化数据
    scaler = MinMaxScaler()
    X_scaled = scaler.fit_transform(X)
    sequences, labels = create_time_seq(X_scaled, y)

    X_seq = np.array(sequences)
    y_seq = np.array(labels)

    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_seq, y_seq, test_size=test_size, shuffle=False)

    # # # 按照原始顺序，抽取数据集作为测试集
    # X_train, X_test, y_train, y_test = train_test_step_split_by_ratio(
    #     X_seq, y_seq, test_size)
    # 转换为 PyTorch 张量并移到 GPU
    X_train = torch.tensor(X_train, dtype=torch.float32).to(device)
    y_train = torch.tensor(y_train, dtype=torch.long).to(device)
    X_test = torch.tensor(X_test, dtype=torch.float32).to(device)
    y_test = torch.tensor(y_test, dtype=torch.long).to(device)

    # 创建数据加载器
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)

    train_loader = DataLoader(
        train_dataset, batch_size=config.BATCH_SIZE)
    test_loader = DataLoader(test_dataset, batch_size=config.BATCH_SIZE)

    return train_loader, test_loader, train_dataset, test_dataset, scaler


def train_model_with_cv(model, X, y, optimizer, criterion, save_name, n_splits=5, time_steps=config.TIME_STEPS):
    """
    使用交叉验证训练模型，并保存最优模型

    Args:
        model: 模型实例
        X: 特征数据
        y: 标签数据
        optimizer: 优化器
        criterion: 损失函数
        save_name: 保存模型的名称
        n_splits: 交叉验证折数
        time_steps: 时间步长

    Returns:
        训练和验证的损失及准确率记录
    """
    from sklearn.preprocessing import MinMaxScaler
    from sklearn.model_selection import KFold
    import os

    # 确保model目录存在
    os.makedirs('./model', exist_ok=True)

    # 标准化数据
    scaler = MinMaxScaler()
    X_scaled = scaler.fit_transform(X)

    # 创建时间序列数据
    sequences, labels = create_time_seq(X_scaled, y, time_steps)

    # 初始化K折交叉验证
    kf = KFold(n_splits=n_splits)

    # 记录所有折的性能
    train_losses = []
    val_losses = []
    train_accuracies = []
    val_accuracies = []

    # 记录最佳模型性能
    best_val_acc = 0.0
    best_fold = 0

    # 每折的开始时间
    start_time = time.time()

    # 进行K折交叉验证
    for fold, (train_idx, val_idx) in enumerate(kf.split(sequences)):
        print(f"\n开始训练第 {fold+1}/{n_splits} 折...")

        # 重置模型
        model.apply(lambda m: m.reset_parameters()
                    if hasattr(m, 'reset_parameters') else None)

        # 准备训练集和验证集
        X_train, X_val = sequences[train_idx], sequences[val_idx]
        y_train, y_val = labels[train_idx], labels[val_idx]

        # 转换为PyTorch张量
        X_train_tensor = torch.tensor(X_train, dtype=torch.float32).to(device)
        y_train_tensor = torch.tensor(y_train, dtype=torch.long).to(device)
        X_val_tensor = torch.tensor(X_val, dtype=torch.float32).to(device)
        y_val_tensor = torch.tensor(y_val, dtype=torch.long).to(device)

        # 创建数据加载器
        train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
        val_dataset = TensorDataset(X_val_tensor, y_val_tensor)
        train_loader = DataLoader(
            train_dataset, batch_size=config.BATCH_SIZE, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=config.BATCH_SIZE)

        # 记录每个epoch的指标
        fold_train_losses = []
        fold_val_losses = []
        fold_train_accuracies = []
        fold_val_accuracies = []

        # 训练模型
        for epoch in range(config.EPOCHS):
            # 训练阶段
            model.train()
            running_loss = 0.0
            correct = 0
            total = 0
            train_pbar = tqdm(
                train_loader, desc=f'Fold {fold+1}/{n_splits}, Epoch {epoch+1}/{config.EPOCHS} [Train]')

            for i, (X_batch, y_batch) in enumerate(train_pbar):
                # 前向传播
                outputs = model(X_batch)
                loss = criterion(outputs, y_batch)

                # 反向传播和优化
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                # 统计准确率
                _, predicted = torch.max(outputs.data, 1)
                total += y_batch.size(0)
                correct += (predicted == y_batch).sum().item()

                # 更新损失
                running_loss += loss.item()
                train_pbar.set_postfix(
                    {'loss': running_loss/(i+1), 'acc': 100*correct/total})

            # 计算训练集上的平均损失和准确率
            epoch_train_loss = running_loss / len(train_loader)
            epoch_train_acc = 100 * correct / total
            fold_train_losses.append(epoch_train_loss)
            fold_train_accuracies.append(epoch_train_acc)

            # 验证阶段
            model.eval()
            val_loss = 0.0
            correct = 0
            total = 0

            with torch.no_grad():
                val_pbar = tqdm(
                    val_loader, desc=f'Fold {fold+1}/{n_splits}, Epoch {epoch+1}/{config.EPOCHS} [Val]')
                for i, (X_batch, y_batch) in enumerate(val_pbar):
                    outputs = model(X_batch)
                    loss = criterion(outputs, y_batch)
                    val_loss += loss.item()

                    _, predicted = torch.max(outputs.data, 1)
                    total += y_batch.size(0)
                    correct += (predicted == y_batch).sum().item()

                    val_pbar.set_postfix(
                        {'loss': val_loss/(i+1), 'acc': 100*correct/total})

            # 计算验证集上的平均损失和准确率
            epoch_val_loss = val_loss / len(val_loader)
            epoch_val_acc = 100 * correct / total
            fold_val_losses.append(epoch_val_loss)
            fold_val_accuracies.append(epoch_val_acc)

            # 输出当前epoch的性能
            print(f'Fold {fold+1}/{n_splits}, Epoch {epoch+1}/{config.EPOCHS}: '
                  f'Train Loss: {epoch_train_loss:.4f}, Train Acc: {epoch_train_acc:.2f}%, '
                  f'Val Loss: {epoch_val_loss:.4f}, Val Acc: {epoch_val_acc:.2f}%')

            # 保存最佳模型（每个fold内的最佳）
            if epoch_val_acc > best_val_acc:
                best_val_acc = epoch_val_acc
                best_fold = fold + 1
                # 保存最佳模型到文件
                torch.save(model, f'./model/{save_name}_best.pth')
                print(
                    f'✓ 在第 {fold+1} 折，第 {epoch+1} 轮更新最佳模型，验证准确率: {best_val_acc:.2f}%')

        # 每个fold结束后，记录这一折的平均损失和准确率
        avg_train_loss = np.mean(fold_train_losses)
        avg_train_acc = np.mean(fold_train_accuracies)
        avg_val_loss = np.mean(fold_val_losses)
        avg_val_acc = np.mean(fold_val_accuracies)

        print(f'\n第 {fold+1} 折平均性能: '
              f'Train Loss: {avg_train_loss:.4f}, Train Acc: {avg_train_acc:.2f}%, '
              f'Val Loss: {avg_val_loss:.4f}, Val Acc: {avg_val_acc:.2f}%')

        # 将这一折的结果添加到总记录中
        train_losses.append(fold_train_losses)
        val_losses.append(fold_val_losses)
        train_accuracies.append(fold_train_accuracies)
        val_accuracies.append(fold_val_accuracies)

    torch.save(model, f'./model/{save_name}_last.pth')

    # 计算总训练时间
    training_time = time.time() - start_time
    print(f'\n交叉验证训练完成，耗时 {training_time:.2f} 秒')
    print(f'最佳模型来自第 {best_fold} 折，验证准确率: {best_val_acc:.2f}%')
    print(f'最佳模型已保存到 ./model/{save_name}.pth')

    return train_losses, val_losses, train_accuracies, val_accuracies


def train_model(model, train_loader, test_loader, optimizer, criterion, save_name):
    """
    训练模型
    Args:
        model (_type_): _description_
        train_loader (_type_): _description_
        test_loader (_type_): _description_
        criterion (_type_): _description_
    """
    # 记录训练和测试指标
    train_losses = []
    test_losses = []
    train_accuracies = []
    test_accuracies = []
    start_time = time.time()
    best_test_acc = 0.0  # 初始化最佳测试准确率
    for epoch in range(config.EPOCHS):
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        # 使用tqdm显示进度条
        train_pbar = tqdm(
            train_loader, desc=f'Epoch {epoch+1}/{config.EPOCHS} [Train]')
        for i, (X_batch, y_batch) in enumerate(train_pbar):
            # 前向传播
            outputs = model(X_batch)
            loss = criterion(outputs, y_batch)

            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 统计准确率
            _, predicted = torch.max(outputs.data, 1)
            total += y_batch.size(0)
            correct += (predicted == y_batch).sum().item()

            # 更新损失
            running_loss += loss.item()
            train_pbar.set_postfix(
                {'loss': running_loss/(i+1), 'acc': 100*correct/total})

        # 计算整个训练集上的平均损失和准确率
        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100 * correct / total
        train_losses.append(epoch_loss)
        train_accuracies.append(epoch_acc)

        # 测试模型
        model.eval()
        test_loss = 0.0
        correct = 0
        total = 0

        with torch.no_grad():
            test_pbar = tqdm(
                test_loader, desc=f'Epoch {epoch+1}/{config.EPOCHS} [Test]')
            for i, (X_batch, y_batch) in enumerate(test_pbar):
                outputs = model(X_batch)
                loss = criterion(outputs, y_batch)
                test_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += y_batch.size(0)
                correct += (predicted == y_batch).sum().item()

                test_pbar.set_postfix(
                    {'loss': test_loss/(i+1), 'acc': 100*correct/total})

        # 计算整个测试集上的平均损失和准确率
        test_loss = test_loss / len(test_loader)
        test_acc = 100 * correct / total
        test_losses.append(test_loss)
        test_accuracies.append(test_acc)

        # 保存最佳模型
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            torch.save(model, f'./model/{save_name}.pth')
            print(f'Best model saved with accuracy: {best_test_acc:.2f}%')

        print(f'Epoch {epoch+1}/{config.EPOCHS}, '
              f'Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.2f}%, '
              f'Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%')

    training_time = time.time() - start_time
    print(f'Training completed in {training_time:.2f} seconds')

    return train_losses, test_losses, train_accuracies, test_accuracies


def unit_min_to_sec(data, col, min_time='2022-01-01 00:00:00'):
    # 确保数据按采集时间排序
    data = data.sort_values(col)

    # 检查时间列是否为正确的datetime格式
    if not pd.api.types.is_datetime64_any_dtype(data[col]):
        data[col] = pd.to_datetime(data[col], errors='coerce')

    # 筛选出有效时间戳的数据进行处理
    data_with_time = data.dropna(subset=[col])

    # 按分钟对数据进行分组
    # 获取数据集中的所有唯一分钟
    grouped = data_with_time.groupby(data_with_time[col].dt.floor('min'))

    # 处理每个分钟组以分配秒数
    for minute, group in grouped:
        # 获取该分钟内的记录数量
        n_records = len(group)

        # 计算要分配的秒数 (0 到 59)
        if n_records > 1:
            # 在分钟内均匀分配秒数
            seconds = np.linspace(0, 59, n_records)
        else:
            # 如果只有一条记录，将其秒数设为0
            seconds = [0]

        # 更新该分钟组中每条记录的datetime
        for i, idx in enumerate(group.index):
            # 创建带有计算秒数的新datetime
            new_time = minute + pd.Timedelta(seconds=int(seconds[i]))
            new_time = new_time.strftime('%Y-%m-%d %H:%M:%S')
            data.at[idx, col] = new_time
    data.at[0, col] = min_time
    return data
