import numpy as np
import scipy.io as sio
import matplotlib
matplotlib.use("Qt5Agg")  # 指定使用 PyQt5
import matplotlib.pyplot as plt
import cv2
from sklearn.preprocessing import MinMaxScaler
from Preprocessing import My_dataset
from torch.utils import data
import glob
import utils
from torchvision import datasets, transforms
import time
import torch
from Neural_Networks import (P2D_GE_Net, CNNNet, DN4Net, ProtoNetClassifier, GNNImageClassifier,
                             DSResNet_GADF, P2D_DSResNet_CWT, P2D_DSResNet_1, P2D_DSResNet_V2)
from torch import nn
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from collections import Counter
import pywt
from torch.optim.lr_scheduler import ExponentialLR
from scipy.signal import hilbert
from scipy.fft import fft
from scipy.ndimage import grey_dilation, grey_erosion, grey_opening, grey_closing
import scipy.io
from joblib import Parallel, delayed
from PIL import Image
import os
import psutil
from torch.utils.data import DataLoader, TensorDataset, Subset
from torch.utils.data import Dataset

'''############################# 基础信息 ##############################'''
'''matplotlib解决中文字体问题'''
plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置中文字体（如黑体）
plt.rcParams['axes.unicode_minus'] = False   # 解决负号显示异常

'''值域[-1,1]映射到 [0,255] 并转成 uint8'''
def normalize_to_uint8(M):
    # M in [-1,1] → [0,255]
    M2 = ((M + 1) * 127.5).clip(0, 255)
    return M2.astype(np.uint8)

def show_fake_image(tensor_img):
    img = tensor_img.detach().cpu().squeeze().numpy()
    plt.imshow(img, cmap='gray')
    plt.title("Generated GADF-like Image")
    plt.axis('off')
    plt.show()


ALL_FE_data = []
ALL_RPM = []
zheng = [56, 57, 58, 59, 48, 49, 50, 51]        # 应对信息处理
'''############################# 信号处理的方法 ##############################'''
def GADF(signal, cataglog, count):
    diff = signal[:, None] - signal[None, :]
    M = np.sin(diff)  # GADF 差场矩阵，shape = (h, h)
    GADF_image = normalize_to_uint8(M)

    # base_dir = "GADF"
    # save_dir = os.path.join(base_dir, str(cataglog))
    # os.makedirs(save_dir, exist_ok=True)
    #
    # filename = f"{count:03d}_{cataglog}_GADF.png"
    # save_path = os.path.join(save_dir, filename)

    img = Image.fromarray(GADF_image)
    # img.save(save_path)
    # print(filename)
    return img


def generate_cwt(signal, cataglog, count, scales=np.arange(1, 65)):
    # 使用 cmor 小波
    coefficients, _ = pywt.cwt(signal, scales, 'cmor1.5-1.0')

    # 取模值并归一化到 [0,1]
    cwt_image = np.abs(coefficients)
    cwt_image = (cwt_image - np.min(cwt_image)) / (np.max(cwt_image) - np.min(cwt_image))

    # 转为 [0,255] 的 uint8 灰度图
    cwt_image_uint8 = (cwt_image * 255).astype(np.uint8)

    # 保存路径
    # base_dir = "CWT"
    # save_dir = os.path.join(base_dir, str(cataglog))
    # os.makedirs(save_dir, exist_ok=True)
    # filename = f"{count:03d}_{cataglog}_CWT.png"
    # save_path = os.path.join(save_dir, filename)

    # 保存为灰度图
    img = Image.fromarray(cwt_image_uint8, mode='L')
    # img.save(save_path)

    # 返回 numpy 而不是 PIL，方便后续处理
    return img


def function_EMTF(y, g_size):
    """单尺度形态学分解 + 权重计算"""
    structure = np.ones(g_size)

    fd = grey_dilation(y, structure=structure)
    fe = grey_erosion(y, structure=structure)
    fo = grey_opening(y, structure=structure)
    fc = grey_closing(y, structure=structure)

    # 细节波形
    WT = y - (fd + fe) / 2

    # 开闭运算组合
    fcde = grey_erosion(grey_dilation(fc, structure=structure), structure=structure)
    foed = grey_dilation(grey_erosion(fo, structure=structure), structure=structure)

    fcde_oed = (fcde + foed) / 2
    w = y - fcde_oed

    EMTF = 2 * WT * w
    return EMTF


def EMTF(signal, N, catalog, count):
    """EMTF 时频图生成"""
    start_time = time.perf_counter()

    # 基础参数
    y = signal[:N]
    fs = 12000
    t = np.arange(N) / fs
    L = len(t)
    NFFT = 2 ** int(np.ceil(np.log2(L)))
    f = fs / 2 * np.linspace(0, 1, NFFT // 2 + 1)

    # 多尺度分解
    scale_range = range(1, 300)
    zatL = Parallel(n_jobs=-1)(
        delayed(function_EMTF)(y, s + 1) for s in scale_range
    )
    zatL = np.array(zatL)

    # 傅里叶包络谱
    baoluoatL = np.zeros((len(scale_range), len(f)))
    for i in range(len(scale_range)):
        analytic_signal = hilbert(zatL[i, :])
        envelope = np.abs(analytic_signal)
        envelope_fft = fft(envelope, n=NFFT) / L
        baoluoatL[i, :] = 2 * np.abs(envelope_fft[:NFFT // 2 + 1])

    # 去掉直流分量
    baoluoatL[:, 0] = 0

    # 频率范围截取 (只取前87个点)
    A = baoluoatL[:, :87]
    C_normalized = (A - np.min(A)) / (np.max(A) - np.min(A))

    # 转换成图像格式
    image_gray = (C_normalized * 255).astype(np.uint8)  # [H, W]
    image_gray = Image.fromarray(image_gray, mode='L')

    # base_dir = "EMTF"
    # save_dir = os.path.join(base_dir, str(catalog))
    # os.makedirs(save_dir, exist_ok=True)
    # filename = f"{count:03d}_{catalog}_EMTF.png"
    # save_path = os.path.join(save_dir, filename)
    # image_gray.save(save_path)

    elapsed_time = time.perf_counter() - start_time
    m, s = divmod(elapsed_time, 60)
    print(f"运行第{count}次, 运行时间 {int(m)} min {s:.2f} s, "
          f"save_img: {count:03d}_{catalog}_EMTF.png")

    return image_gray


'''############################# 数据加载 ##############################'''
def Data_loading():
    '''加载数据'''
    # 数据加载
    data_paths = glob.glob(r"D:\pycharm_project\Bearing_inspection\Be_legend\data_handle\four_data")

    transform = transforms.Compose([
        transforms.Resize((128, 128)),  # 改为标准尺寸
        transforms.RandomCrop(128),
        transforms.RandomVerticalFlip(),
        transforms.RandomRotation(20),  # 角度单位为度
        transforms.ColorJitter(brightness=0.1, contrast=0.1,
                               saturation=0.1, hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485], std=[0.229])
    ])

    data1 = {}
    for i in range(4):
        for st in ['CWT', 'EMTF', 'GADF']:
            print(f'create:{st}_{str(i)}')
            pic_list = []
            data_path = glob.glob(f"D:\pycharm_project\Bearing_inspection\Be_legend\data_handle\\four_data\{str(i)}\{st}\*.png")
            for pic in data_path:
                img = Image.open(pic)
                M = transform(img)
                pic_list.append(M)
            data1[f'{st}_{str(i)}'] = pic_list

    return data1, data_paths


class MyTensorDataset(Dataset):
    def __init__(self, samples, labels):
        self.samples = samples
        self.labels = labels

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        return self.samples[idx], self.labels[idx]


'''############################# 数据处理 ##############################'''
def aLL(data1, data_num, is_cwt=1):
    GADF_list, CWT_list, EMTF_list = [], [], []
    GADF_labels, CWT_labels, EMTF_labels = [], [], []

    for label_idx in range(4):  # 四个故障类别
        gadf_items = data1.get(f"GADF_{label_idx}", [])
        cwt_items  = data1.get(f"CWT_{label_idx}", [])
        emtf_items = data1.get(f"EMTF_{label_idx}", [])

        # 4-fold 划分
        gadf_len = len(gadf_items) // 4
        gadf_items = gadf_items[gadf_len * data_num : gadf_len * (data_num + 1)]

        cwt_len = len(cwt_items) // 4
        cwt_items = cwt_items[cwt_len * data_num : cwt_len * (data_num + 1)]

        emtf_len = len(emtf_items) // 4
        emtf_items = emtf_items[emtf_len * data_num : emtf_len * (data_num + 1)]

        GADF_list.extend(gadf_items)
        CWT_list.extend(cwt_items)
        EMTF_list.extend(emtf_items)

        GADF_labels.extend([label_idx] * len(gadf_items))
        CWT_labels.extend([label_idx] * len(cwt_items))
        EMTF_labels.extend([label_idx] * len(emtf_items))

    # 数据集
    GADF_dataset = MyTensorDataset(GADF_list, GADF_labels)
    CWT_dataset = MyTensorDataset(CWT_list, CWT_labels)
    EMTF_dataset = MyTensorDataset(EMTF_list, EMTF_labels)

    # 计算训练集和测试集大小
    dataset_size = len(GADF_dataset)
    train_size = int(0.8 * dataset_size)
    test_size = dataset_size - train_size

    # 固定随机数种子，保证可复现
    torch.manual_seed(42)
    indices = torch.randperm(dataset_size).tolist()  # 随机打乱一次
    train_indices = indices[:train_size]
    test_indices = indices[train_size:]

    # 保证三个数据集用相同的索引划分
    GADF_train = Subset(GADF_dataset, train_indices)
    GADF_test = Subset(GADF_dataset, test_indices)

    CWT_train = Subset(CWT_dataset, train_indices)
    CWT_test = Subset(CWT_dataset, test_indices)

    EMTF_train = Subset(EMTF_dataset, train_indices)
    EMTF_test = Subset(EMTF_dataset, test_indices)

    BATCH_SIZE = 32

    # 创建 DataLoader，全部 shuffle=False
    GADF_train_loader = DataLoader(GADF_train, batch_size=BATCH_SIZE, shuffle=False)
    GADF_test_loader = DataLoader(GADF_test, batch_size=BATCH_SIZE, shuffle=False)

    CWT_train_loader = DataLoader(CWT_train, batch_size=BATCH_SIZE, shuffle=False)
    CWT_test_loader = DataLoader(CWT_test, batch_size=BATCH_SIZE, shuffle=False)

    EMTF_train_loader = DataLoader(EMTF_train, batch_size=BATCH_SIZE, shuffle=False)
    EMTF_test_loader = DataLoader(EMTF_test, batch_size=BATCH_SIZE, shuffle=False)

    # 根据 is_cwt 选择使用的数据集
    if is_cwt == 0:
        train_dl = CWT_train_loader
        test_dl = CWT_test_loader
    elif is_cwt == 1:
        train_dl = GADF_train_loader
        test_dl = GADF_test_loader
    elif is_cwt == 2:
        train_dl = EMTF_train_loader
        test_dl = EMTF_test_loader
    else:
        train_dl, test_dl = [], []

    return train_dl, test_dl


'''############################# 添加或删除模型 ##############################'''
def model_de():
    # GPU设置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 定义损失函数
    loss_fn = nn.CrossEntropyLoss()

    modell = {}

    # CNNNet
    model1 = CNNNet().to(device)
    opt1 = torch.optim.Adam(model1.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler1 = torch.optim.lr_scheduler.ExponentialLR(opt1, gamma=0.9)
    modell['CNNNet'] = {
        "model": model1,
        "opt": opt1,
        "scheduler": scheduler1,
    }

    # ProtoNetClassifier
    model2 = ProtoNetClassifier(in_channels=1, hidden_size=64, num_classes=4).to(device)
    opt2 = torch.optim.Adam(model2.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler2 = torch.optim.lr_scheduler.ExponentialLR(opt2, gamma=0.9)
    modell['ProtoNetClassifier'] = {
        "model": model2,
        "opt": opt2,
        "scheduler": scheduler2,
    }

    # DN4Net
    model3 = DN4Net(in_channels=1, num_classes=4).to(device)
    opt3 = torch.optim.Adam(model3.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler3 = torch.optim.lr_scheduler.ExponentialLR(opt3, gamma=0.9)
    modell['DN4Net'] = {
        "model": model3,
        "opt": opt3,
        "scheduler": scheduler3,
    }

    # GNNImageClassifier
    model4 = GNNImageClassifier(in_channels=1, num_classes=4).to(device)
    opt4 = torch.optim.Adam(model4.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler4 = torch.optim.lr_scheduler.ExponentialLR(opt4, gamma=0.9)
    modell['GNNImageClassifier'] = {
        "model": model4,
        "opt": opt4,
        "scheduler": scheduler4,
    }

    # P2D_GE_Net
    model5 = P2D_GE_Net(num_classes=4).to(device)
    opt5 = torch.optim.Adam(model5.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler5 = torch.optim.lr_scheduler.ExponentialLR(opt5, gamma=0.9)
    modell['P2D_GE_Net'] = {
        "model": model5,
        "opt": opt5,
        "scheduler": scheduler5,
    }

    # DSResNet_GADF
    model6 = DSResNet_GADF(num_classes=4).to(device)
    opt6 = torch.optim.Adam(model6.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler6 = torch.optim.lr_scheduler.ExponentialLR(opt6, gamma=0.9)
    modell['DSResNet_GADF'] = {
        "model": model6,
        "opt": opt6,
        "scheduler": scheduler6,
    }

    # P2D_DSResNet_CWT
    model7 = P2D_DSResNet_CWT(num_classes=4).to(device)
    opt7 = torch.optim.Adam(model7.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler7 = torch.optim.lr_scheduler.ExponentialLR(opt7, gamma=0.9)
    modell['P2D_DSResNet_CWT'] = {
        "model": model7,
        "opt": opt7,
        "scheduler": scheduler7,
    }

    model8 = P2D_DSResNet_1(num_classes=4).to(device)
    opt8 = torch.optim.Adam(model8.parameters(),
                            lr=0.001,
                            weight_decay=1e-5)
    scheduler8 = torch.optim.lr_scheduler.ExponentialLR(opt8, gamma=0.9)
    modell['P2D_DSResNet_1'] = {
        "model": model8,
        "opt": opt8,
        "scheduler": scheduler8,
    }

    model9 = P2D_DSResNet_V2(num_classes=4).to(device)
    opt9 = torch.optim.Adam(model9.parameters(),
                            lr=0.001,
                            weight_decay=1e-5)
    scheduler9 = torch.optim.lr_scheduler.ExponentialLR(opt9, gamma=0.9)
    modell['P2D_DSResNet_V2'] = {
        "model" : model9,
        "opt" : opt9,
        "scheduler" : scheduler9
    }

    return modell, loss_fn

'''############################# 时间获取 ##############################'''
def get_time(time):
    if time > 60:
        min = time // 60
        sec = time % 60
        return min, sec
    else:
        return 0, time


'''############################# 训练一次函数 ##############################'''
def fit(epochs, epoch, model, traindataloader, test_dataloader, loss_fn, optim, scheduler, device):
    start_time_1 = time.perf_counter()
    corret = 0
    total = 0
    running_loss = 0
    model.train()           # 训练模式
    for x, y in traindataloader:
        y = y.long()
        x, y = x.to(device), y.to(device)
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optim.zero_grad()
        loss.backward()
        optim.step()

        with torch.no_grad():
            y_pred = torch.argmax(y_pred, dim=1)  # 沿第1轴（行）计算最大值的索引
            corret += (y_pred == y).sum().item()
            total += y.size(0)
            running_loss += loss.item()

    scheduler.step()
    epoch_acc = corret / total
    epoch_loss = running_loss / len(traindataloader.dataset)

    test_corret = 0
    test_total = 0
    test_running_loss = 0
    model.eval()            # 预测模式
    with torch.no_grad():
        for x, y in test_dataloader:
            y = y.long()
            x, y = x.to(device), y.to(device)
            y_pred = model(x)
            loss = loss_fn(y_pred, y)
            y_pred = torch.argmax(y_pred, dim=1)  # 沿第1轴（行）计算最大值的索引
            test_corret += (y_pred == y).sum().item()
            test_total += y.size(0)
            test_running_loss += loss.item()

    test_epoch_acc = test_corret / test_total
    test_epoch_loss = test_running_loss / len(test_dataloader.dataset)
    time_end_1 = time.perf_counter() - start_time_1
    rest_time = time_end_1 * (epochs - epoch)

    print(f"Epoch [{epoch + 1}/{epochs}],   train_Loss: {epoch_loss:.4f},    "
          f"train_accuracy: {epoch_acc:.4f},    test_Loss: {test_epoch_loss:.4f},    "
          f"test_accuracy: {test_epoch_acc:.4f},    "
          )

    try:
        min, sed = get_time(rest_time)
        print(f"rest_time: {int(min)} m, {int(sed)} s")
    except TypeError:
        print("Cannot unpack non-iterable NoneType object\n")
    return epoch_loss, epoch_acc, test_epoch_loss, test_epoch_acc


'''############################# 训练epoch个函数 ##############################'''
def train1(name: str, loss_fn, train_dl, test_dl, amodel: dict, epochs: int):
    # GPU设置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 训练过程
    start_time = time.perf_counter()
    acc_shu = []
    acc_shu_list = []
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []

    name_model = amodel[name]
    model = name_model["model"]
    opt = name_model["opt"]
    scheduler = name_model["scheduler"]

    for epoch in range(epochs):
        epoch_loss, epoch_acc, test_epoch_loss, test_epoch_acc = fit(epochs,
                                                                     epoch,
                                                                     model,
                                                                     train_dl,
                                                                     test_dl,
                                                                     loss_fn,
                                                                     opt,
                                                                     scheduler,
                                                                     device
                                                                     )
        train_loss.append(epoch_loss)
        train_acc.append(epoch_acc)
        test_loss.append(test_epoch_loss)
        test_acc.append(test_epoch_acc)
        if epoch % 10 == 0:
            acc_shu.append(test_epoch_acc)
            acc_shu_list.append(epoch)

    time_end = time.perf_counter() - start_time
    min, sec = get_time(time_end)
    print(f"all_time: {int(min)} minutes, {int(sec)} seconds")

    '''保存模型'''
    # 保存模型参数
    PATH = f'0{name}model.pth'
    torch.save(model.state_dict(), PATH)
    return test_acc[-1]


'''############################# 训练P2D_GE_Net模型 ##############################'''
def p2D_train(name: str, loss_fn, GADF_train_dl, GADF_test_dl, CWT_train_dl, CWT_test_dl, amodel: dict, epochs: int):
    # GPU设置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # print(f"使用设备: {device}")

    # 训练过程
    start_time = time.perf_counter()
    acc_shu = []
    acc_shu_list = []
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []

    name_model = amodel[name]
    model = name_model["model"].to(device)  # 模型移到设备
    optim = name_model["opt"]
    scheduler = name_model["scheduler"]

    # 确保数据加载器长度一致
    assert len(GADF_train_dl) == len(CWT_train_dl), "训练数据加载器长度不一致"
    assert len(GADF_test_dl) == len(CWT_test_dl), "测试数据加载器长度不一致"

    for epoch in range(epochs):
        start_time_1 = time.perf_counter()
        corret = 0
        total = 0
        running_loss = 0

        # ======= 训练阶段 =======
        model.train()
        for ((GADF_x, GADF_labels), (CWT_x, CWT_labels)) in zip(GADF_train_dl, CWT_train_dl):
            assert torch.equal(GADF_labels, CWT_labels), "标签不一致，说明顺序乱了"
            y = GADF_labels.long()
            # 确保所有张量在设备上
            GADF_x, CWT_x, y = GADF_x.to(device), CWT_x.to(device), y.to(device)

            # 前向传播
            y_pred = model(GADF_x, CWT_x)
            loss = loss_fn(y_pred, y)

            # 反向传播
            optim.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # 梯度裁剪
            optim.step()

            # 计算指标
            with torch.no_grad():
                y_pred = torch.argmax(y_pred, dim=1)
                corret += (y_pred == y).sum().item()
                total += y.size(0)
                running_loss += loss.item() * y.size(0)  # 累计损失乘以样本数

        # 学习率更新
        scheduler.step()

        # 计算epoch指标
        epoch_acc = corret / total
        epoch_loss = running_loss / total  # 总损失除以总样本数

        # ======= 验证阶段 =======
        test_corret = 0
        test_total = 0
        test_running_loss = 0
        model.eval()
        with torch.no_grad():
            for ((GADF_x, GADF_labels), (CWT_x, _)) in zip(GADF_test_dl, CWT_test_dl):
                y = GADF_labels.long()
                GADF_x, CWT_x, y = GADF_x.to(device), CWT_x.to(device), y.to(device)
                y_pred = model(GADF_x, CWT_x)
                loss = loss_fn(y_pred, y)
                y_pred = torch.argmax(y_pred, dim=1)
                test_corret += (y_pred == y).sum().item()
                test_total += y.size(0)
                test_running_loss += loss.item() * y.size(0)  # 累计损失乘以样本数

        test_epoch_acc = test_corret / test_total
        test_epoch_loss = test_running_loss / test_total

        # 输出日志
        time_end_1 = time.perf_counter() - start_time_1
        rest_time = time_end_1 * (epochs - epoch - 1)

        print(f"Epoch [{epoch + 1}/{epochs}], train_Loss: {epoch_loss:.4f}, "
              f"train_accuracy: {epoch_acc:.4f}, test_Loss: {test_epoch_loss:.4f}, "
              f"test_accuracy: {test_epoch_acc:.4f}")

        # 记录指标
        train_loss.append(epoch_loss)
        train_acc.append(epoch_acc)
        test_loss.append(test_epoch_loss)
        test_acc.append(test_epoch_acc)

        # 每10个epoch保存最佳模型
        if epoch % 10 == 0:
            acc_shu.append(test_epoch_acc)
            acc_shu_list.append(epoch)
            # 保存检查点
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optim.state_dict(),
                'loss': test_epoch_loss,
            }, f'checkpoint_epoch_{epoch}.pth')

    # 训练结束
    time_end = time.perf_counter() - start_time
    min, sec = get_time(time_end)
    print(f"总训练时间: {int(min)} 分 {int(sec)} 秒")

    # 保存最终模型
    torch.save(model.state_dict(), f'0{name}model.pth')
    return test_acc[-1]


'''############################# 得到正确率表格 ##############################'''
def data_colum(ddd: dict):
    # 表头
    columns = ['Model', 'A', 'B', 'C', 'D']

    # 表格数据
    data = []
    for key, value in ddd.items():
        data.append([key, value[0], value[1], value[2], value[3]])

    # 创建图像和子图（隐藏坐标轴）
    fig, ax = plt.subplots()
    ax.axis('tight')
    ax.axis('off')

    # 添加表格
    table = ax.table(
        cellText=data,
        colLabels=columns,
        loc='center',
        cellLoc='center',      # 单元格内容居中
        colLoc='center'        # 表头居中
    )

    # 美化字体大小
    table.scale(1, 2)          # 拉高单元格
    table.auto_set_font_size(False)
    table.set_fontsize(12)

    # 显示图像
    plt.title("Model Accuracy on Different Datasets", fontsize=14)
    plt.tight_layout()


'''############################# 得到树状图 ##############################'''
def Correctness_Dendrogram(plpl: dict):
    """
    plpl: dict, 格式要求
        {
            "Model1": [[0.85, 0.08], [0.84, 0.07], [0.85, 0.03], [0.70, 0.04]],
            "Model2": [[...], [...], [...], [...]]
        }
        每个元素是 [均值, 标准差]
    """
    labels = ['A', 'B', 'C', 'D']
    fig, ax = plt.subplots(figsize=(8, 6))
    width = 0.20
    x = np.arange(len(labels))
    colors = ['#898989', '#989898', '#b7b7b7', '#c3c3c3']
    total_group = len(plpl)

    for i, (name, group) in enumerate(plpl.items()):
        # 拆分均值和误差
        means = [val[0] for val in group]
        errors = [val[1] for val in group]

        offset = (i - (total_group - 1) / 2) * width
        ax.bar(
            x + offset, means, width,
            yerr=errors, capsize=5,  # 误差棒
            label=name, color=colors[i % len(colors)],
            alpha=0.9
        )

    ax.set_ylabel('准确率')
    ax.set_xlabel('类别')
    ax.set_title('不同模型在各类别下的准确率比较')
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()
    plt.tight_layout()


'''############################# 一个模型多次进行epochs训练 ##############################'''
def Multiple_training(name: str, data1: dict, data_paths, ddd: dict, abc: list, epochs: int, plpl: dict):
    CNN_acc_dic = {}
    plpl[name] = []   # 每个模型存放4类数据
    ddd[name] = []

    for data_num in range(4):
        print(f"\n##########################\n模型：{name}, 第{data_num+1}次/共4次\n###########################")

        Model_acc = []

        # 单分支模型
        if name not in ["P2D_GE_Net", "P2D_DSResNet_1", "P2D_DSResNet_V2"]:
            if name == "P2D_DSResNet_CWT":
                train_dl, test_dl = aLL(data1, data_num, is_cwt=0)
            else:
                train_dl, test_dl = aLL(data1, data_num, is_cwt=1)

            for i in range(4):
                print(f"\n第{data_num + 1}中的第{i + 1}次:")
                model, loss_fn = model_de()
                test_acc = train1(name, loss_fn, train_dl, test_dl, model, epochs)
                Model_acc.append(test_acc)

        # 双分支模型
        else:
            GADF_train_dl, GADF_test_dl = aLL(data1, data_num, is_cwt=1)
            CWT_train_dl1, CWT_test_dl1 = aLL(data1, data_num, is_cwt=2)

            for i in range(4):
                print(f"\n第{data_num + 1}中的第{i + 1}次:")
                model, loss_fn = model_de()
                test_acc = p2D_train(name, loss_fn,
                                     GADF_train_dl, GADF_test_dl,
                                     CWT_train_dl1, CWT_test_dl1,
                                     model, epochs)
                Model_acc.append(test_acc)

        # ===== 统计结果 =====
        mean = np.mean(Model_acc)
        std_dev = np.std(Model_acc, ddof=1)  # 样本标准差

        # 存到 plpl（用于画柱状图）
        plpl[name].append([mean, std_dev])

        # 存到 CNN_acc_dic（便于打印）
        CNN_acc_dic[f'{data_num}'] = [mean, std_dev]

        # 存到 ddd（保存 "均值 ± 标准差" 的字符串）
        for i, q in enumerate(abc):
            print(f"{abc[data_num]}: {mean:.2f} ± {std_dev:.2f}")
            ddd[name].append(f"{mean:.2f} ± {std_dev:.2f}")

    return ddd

'''############################# 主循环 ##############################'''
if __name__ == "__main__":
    ddd = {}
    plpl = {}
    abc = ['A', 'B', 'C', 'D']
    data1, data_paths = Data_loading()

    Multiple_training("P2D_GE_Net", data1, data_paths, ddd, abc, 10, plpl)
    Multiple_training("CNNNet", data1, data_paths, ddd, abc, 3, plpl)
    # Multiple_training("GNN", data1, data_paths, ddd, abc, 5, plpl)
    Multiple_training("DN4Net", data1, data_paths, ddd, abc, 3, plpl)
    # Multiple_training("ProtoNet", data1, data_paths, ddd, abc, 5, plpl)
    Multiple_training("DSResNet_GADF", data1, data_paths, ddd, abc, 3, plpl)
    # Multiple_training("P2D_DSResNet_CWT", data1, data_paths, ddd, abc, 5, plpl)
    # Multiple_training("P2D_DSResNet_1", data1, data_paths, ddd, abc, 20, plpl)
    # Multiple_training("P2D_DSResNet_V2", data1, data_paths, ddd, abc, 20, plpl)
    # 绘图
    data_colum(ddd)
    Correctness_Dendrogram(plpl)
    plt.show()

