import os
from sklearn.metrics import confusion_matrix
from counfusion_matrix import print_cm
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import CosineAnnealingLR,StepLR,LambdaLR
import torch
from mobileNet import MobileNetV2
import torch.nn as nn
import numpy as np
import logging
from datetime import datetime
from Imbanlance_Loss import Focal_Loss,DiceLoss,PolyLoss
import sys
import math
from SKNet import  sknet
import torch.optim as optim
from CNN import AudioClassifier,AudioClassifier2
from SKCNN import SKCNNNet
from EffientnetV1 import EfficientNet
from My_Dataloader import  NewDataset,TrianDataset,Dataset2,MyDataset
from torch.utils.data import DataLoader, Dataset
from SE_ResNet import MyModel as SEnet
from SE_ResNet import resblock
from new_Resnet import resnet18,BasicBlock
from EfficientNet import efficientnet_b0
from ViT import VisionTransformer
from ViT_ResNet import VisionTransformer_ResNet
from torch.utils.data import DataLoader, WeightedRandomSampler
# from torch.utils.tensorboard import SummaryWriter
from patient_information import get_locations,cal_patient_acc,single_result,location_result
# import opensmile
# from imblearn.over_sampling import SVMSMOTE,RandomOverSampler
import random
from mobileNetV3 import *
from ghostNet import *
from spafe.features.gfcc import gfcc
from spafe.utils.preprocessing import SlidingWindow
init_seed = 10
torch.manual_seed(init_seed)
torch.cuda.manual_seed(init_seed)
torch.cuda.manual_seed_all(init_seed)
np.random.seed(init_seed) # 用于numpy的随机数
# torch.backends.cudnn.benchmark = False
random.seed(init_seed)
torch.backends.cudnn.deterministic = True


class WarmupLR(optim.lr_scheduler._LRScheduler):
    def __init__(self, optimizer, warmup_steps, gamma, last_epoch=-1):
        """
        optimizer: 优化器对象
        warmup_steps: 学习率线性增加的步数
        gamma: 学习率下降系数
        last_epoch: 当前训练轮数
        """
        self.warmup_steps = warmup_steps
        self.gamma = gamma
        super().__init__(optimizer, last_epoch)

    def get_lr(self):
        if self.last_epoch < self.warmup_steps:
            # 学习率线性增加
            return [base_lr * (self.last_epoch + 1) / self.warmup_steps for base_lr in self.base_lrs]
        else:
            # 学习率按指数衰减
            return [base_lr * math.exp(-(self.last_epoch - self.warmup_steps + 1) * self.gamma) for base_lr in
                    self.base_lrs]



def print_weights(module, input, output):
        print("模型权重：")
        for param in module.parameters():
            print(param[0])
            break
def print_layer_params(model, layer_name):
    for name, param in model.named_parameters():
        if layer_name in name:
            print(f'{name}: {param[0]}')

if __name__ == "__main__":
    fold = 'no_fold'
    data_path = 'data_new'
    # cut_data = '5fold_cut'

    fold_path = os.path.join(data_path,fold)
    cut_data = os.path.join(data_path,fold,"vali_data")
    # fold_path = 'data'
    feat_path = os.path.join(fold_path,'feature')
    label_path = os.path.join(fold_path,'label')

    data_train = np.load(feat_path + '/train_loggamma.npy', allow_pickle=True)
    data_vali = np.load(feat_path + '/vali_loggamma.npy', allow_pickle=True)

    label_train = np.load(label_path + '/train_label.npy', allow_pickle=True)
    label_vali = np.load(label_path + '/vali_label.npy', allow_pickle=True)


    #dice_target = nn.functional.one_hot(label_vali.to(torch.int64), 3).float() #标签转为独热编码
    # sampler = RandomOverSampler()
    # # sampler = SVMSMOTE()
    # data_train, label_train = sampler.fit_resample(data_train, label_train)

    # location_train = np.load(label_path + '/train_location.npy', allow_pickle=True)
    location_vali = np.load(label_path + '/vali_location.npy', allow_pickle=True)
    # id_train = np.load(label_path + '/train_id.npy', allow_pickle=True)
    id_vali = np.load(label_path + '/vali_id.npy', allow_pickle=True)
    index_train = np.load(label_path + '/train_index.npy', allow_pickle=True)
    index_vali = np.load(label_path + '/vali_index.npy', allow_pickle=True)

    train_wavdata = np.load(feat_path + '/train_wavdata.npy', allow_pickle=True)
    vali_wavdata = np.load(feat_path + '/vali_wavdata.npy', allow_pickle=True)

    # stat_feat_path = os.path.join(fold_path, 'statistical_feature')
    # demo_train = np.load(stat_feat_path + '/train_demographic.npy', allow_pickle=True)
    # demo_vali = np.load(stat_feat_path + '/vali_demographic.npy', allow_pickle=True)/MV/MV

    # stat_train = np.load(stat_feat_path + '/train_static.npy', allow_pickle=True)
    # stat_vali = np.load(stat_feat_path + '/vali_static.npy', allow_pickle=True)            train_index

    # train_set = NewDataset(wav_label=label_train, wav_data=data_train,wav_index=index_train)
    # vali_set = NewDataset(wav_label=label_vali, wav_data=data_vali, wav_index=index_vali)

    train_set = MyDataset(wav_label=label_train, wav_data=data_train,wav_index=index_train,wav=train_wavdata)
    vali_set = MyDataset(wav_label=label_vali, wav_data=data_vali, wav_index=index_vali,wav=vali_wavdata)
    # train_set = Dataset2(wav_label=label_train, wav_data=data_train, wav_index=index_train, demographic=demo_train,
    #                     static=stat_train)
    # vali_set = Dataset2(wav_label=label_vali, wav_data=data_vali, wav_index=index_vali, demographic=demo_vali,
    #                     static=stat_vali)



    #对测试集数据进行删减
    # Absent_index=[]
    # Soft_index =[]
    # Loud_index = []
    # for data, label, index in vali_set:
    #     if label == 0:
    #         Absent_index.append(index)
    #     if label == 1:
    #         Soft_index.append(index)
    #     if label == 2:
    #         Loud_index.append(index)
    # Absent_index = np.array(Absent_index)
    # Soft_index = np.array(Soft_index)
    # Loud_index = np.array(Loud_index)
    # Absent_to_delete = np.random.choice(Absent_index,size=3000, replace=False)
    # Soft_to_delete = np.random.choice(Soft_index, size=0, replace=False)
    # Loud_to_delete = np.random.choice(Loud_index, size=0, replace=False)
    # rows_to_delete = np.concatenate([Absent_to_delete,Soft_to_delete,Loud_to_delete])
    # data_vali = np.delete(data_vali, rows_to_delete, axis=0)
    # label_vali = np.delete(label_vali, rows_to_delete, axis=0)
    # index_vali = np.delete(index_vali, rows_to_delete, axis=0)
    # vali_set = NewDataset(wav_label=label_vali, wav_data=data_vali, wav_index=index_vali)

    data1=train_set.data[0]
    print(data1.shape)
    num_classes = 3
    class_count = [0] * num_classes
    for _, label,_,_  in train_set:
        class_count[label] += 1
    print("train_set:",'absent:', class_count[0], 'soft:', class_count[1], 'loud:', class_count[2])
    vali_class_count = [0] * num_classes
    for data, label, index,_  in vali_set:
        vali_class_count[label] += 1
    print("vali_set:",'absent:', vali_class_count[0], 'soft:', vali_class_count[1], 'loud:', vali_class_count[2])

    # 计算每个类别的采样权重
    target_count = max(class_count)
    class_weights = [target_count / count for count in class_count]
    # 创建权重采样器
    weights = [class_weights[label] for _, label,_ ,_ in train_set]
    weighted_sampler = WeightedRandomSampler(weights, len(train_set), replacement=True)


    train_batch_size = 128
    test_batch_size = 1
    learning_rate = 0.005
    num_epochs =20
    img_size = (32,240)
    patch_size = (8,20)
    encoders = 1
    num_heads = 12
    # ========================/ dataloader /========================== #
    # train_loader = DataLoader(train_set, batch_size=train_batch_size,  shuffle=True, drop_last=True)
    train_loader = DataLoader(train_set, batch_size=train_batch_size, sampler=weighted_sampler,drop_last=True)
    test_loader = DataLoader(vali_set, batch_size=test_batch_size)
    # test_loader = DataLoader(vali_set, batch_size=test_batch_size, sampler=weighted_sampler)
    print("Dataloader is ok")

    # model = SEnet(resblock, 1, 3)
    # model = MobileNetV2()
    # model = efficientnetv2_base()
    # model = sknet(3, [2,2,2,2])
    # model = efficientnetv2_my()
    # model = efficientnetv2_s()
    # model = AudioClassifier()
    model = MobileNetV3()
    # model = GhostNet()
    # model = SKCNNNet()
    # model = efficientnet_b0()
    # model = resnet18()
    model_result_path = os.path.join('MobileNetV3_result', fold_path)
    # model_result_path = os.path.join('SKCNN_result', fold_path,"TV/")
    # model = ResNet18(BasicBlock, num_classes=3)
    # model = VisionTransformer(img_size=img_size,
    #                           patch_size=patch_size,
    #                           in_c=1,
    #                           embed_dim=768,
    #                           depth=encoders,
    #                           num_heads=num_heads,
    #                           num_classes=3
    #                           )

    # model.fc.register_forward_hook(print_weights)
    # model = torch.load('ViT_result/data_5fold/3_fold/model/last_model')
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = model.to(device)  # 放到设备中nn.ReLU(),nn.ReLU(),nn.ReLU(),
    # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)nn.MaxPool2d((2,1))
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-7)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=30)
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.6, last_epoch=-1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [5, 10, 15,20], gamma=0.1)
    # scheduler = StepLR(optimizer, step_size=5, gamma=0.9)#不大行
    # scheduler = WarmupLR(optimizer, warmup_steps=5,gamma=0.95)
    # weight = torch.tensor([1.0, 3.0, 2.0]).to(device)optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-7)
    # criterion = nn.CrossEntropyLoss(weight=weight)nn.ReLU(),
    # criterion = nn.CrossEntropyLoss()

    weight=torch.tensor([1,1,1]).to(device)
    criterion = Focal_Loss(gamma=2.5, weight=weight)
    # criterion = DiceLoss()
    # criterion = PolyLoss()

    # 保存验证集准确率最大时的模型
    model_path = os.path.join(model_result_path, "model")
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    result_path = os.path.join(model_result_path, "ResultFile")
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    all_train_loss = []
    all_train_acc = []
    all_val_loss = []
    all_val_acc = []
    best_val_acc =-np.inf
    best_val_UAR = -np.inf
    best_val_F1 = -np.inf
    best_val_acc_soft = -np.inf
    best_val_soft = -np.inf

    aver_PCG_acc = []
    aver_PCG_UAR = []
    aver_PCG_absent = []
    aver_PCG_soft = []
    aver_PCG_loud = []



    best_val_patient_acc = -np.inf
    best_val_loss = 1
    best_train_acc = -np.inf

    # train model
    no_better_epoch = 0 #早停 记录性能没有变好的轮数
    torch.manual_seed(10)
    for epoch in range(num_epochs):
        # print_layer_params(model, 'fc.weight')
        train_loss = 0.0
        train_acc = 0.0
        model.train()
        all_y_pred = []
        for batch_idx, data in enumerate(train_loader):
            # if batch_idx == 30:
            #     print_layer_params(model, 'fc.weight')
            # print_layer_params(model, 'fc.weight')
            x, y, z,wav = data
            x = x.to(device)
            y = y.to(device)
            z = z.to(device)
            # wav = wav.to(device)
            # x1 = x1.to(device)#renkou
            # x2 = x2.to(device)#tongji
            # input2 = torch.cat((x1,x2),dim=1)

            outputs = model(x)
            optimizer.zero_grad()
            loss = criterion(outputs, y.long())
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, y_pred = outputs.max(1)
            num_correct = (y_pred == y).sum().item()
            acc = num_correct / train_batch_size
            train_acc += acc
            all_y_pred.append(y_pred.cpu().detach())
        scheduler.step()
        print("第%d个epoch的学习率：%f" % (epoch, optimizer.param_groups[0]['lr']))
        all_train_acc.append(train_acc / len(train_loader))
        all_train_loss.append(train_loss / len(train_loader))



        # evaluate model

        val_loss = 0.0
        val_acc = 0.0

        all_y_pred = []        #存放3s样本的预测概率
        all_y_pred_label = [] #存放3s样本的真实标签
        all_label = []          #存放3s样本的预测标签
        all_id = []
        all_location = []
        with torch.no_grad():
            model.eval()
            for i, data in enumerate(test_loader):
                # x, y, z ,x1,x2= data
                x, y, z,wav = data
                x = x.to(device)
                y = y.to(device)
                z = z.to(device)
                # wav = wav.to(device)
                # signal =
                # x1 = x1.to(device)
                # x2 = x2.to(device)
                # input2 = torch.cat((x1,x2),dim=1)

                outputs = model(x)
                loss = criterion(outputs, y.long())
                val_loss += loss.item()
                _, y_pred = outputs.max(1)
                num_correct = (y_pred == y).sum().item()
                acc = num_correct / test_batch_size
                val_acc += acc
                softmax = nn.Softmax(dim = 1)
                all_y_pred.append(softmax(outputs).cpu().detach())
                all_label.append(y.cpu().detach())
                all_y_pred_label.append(y_pred.cpu().detach())
                for ii in range(test_batch_size):
                    all_id.append(id_vali[z[ii].cpu().detach()])
                    all_location.append(location_vali[z[ii].cpu().detach()])

        all_y_pred = np.vstack(all_y_pred)

        all_label = np.hstack(all_label)
        all_y_pred_label = np.hstack(all_y_pred_label)

        all_val_acc.append(val_acc / len(test_loader))
        all_val_loss.append(val_loss / len(test_loader))

        acc_metric = val_acc / len(test_loader)
        loss_metric = val_loss / len(test_loader)

        # 计算人头的准确率
        vali_data_directory = os.path.join(cut_data)
        # patient_pre, patient_ture_label, patient_acc  = single_result(vali_data_directory,all_id,all_y_pred)
        patient_pre, patient_ture_label, patient_acc = cal_patient_acc(vali_data_directory, all_id, all_y_pred,
                                                                       all_location)
        print("======================================================================================================================")
        print(
            "Epoch: %d, Train Loss: %.4f, Train Acc: %.4f, Val Loss: %.4f, "
            "Val Acc: %.4f,patient Acc: %.4f "
            % (
                epoch,
                train_loss / len(train_loader),
                train_acc / len(train_loader),
                val_loss / len(test_loader),
                val_acc / len(test_loader),
                patient_acc

            )
        )
        # PCG分类性能
        # 将预测标签和真实标签转换为numpy数组
        y_pred = np.array(all_y_pred_label)
        y_true = np.array(all_label)
        # 计算混淆矩阵
        cm = confusion_matrix(y_true, y_pred)
        # 计算召回率 F1
        Absent_num = np.sum(cm[0])
        Soft_num = np.sum(cm[1])
        Loud_num = np.sum(cm[2])
        Absent_recall = cm[0][0] / Absent_num
        Soft_recall = cm[1][1] / Soft_num
        Loud_recall = cm[2][2] / Loud_num

        PCG_UAR = (Absent_recall+Soft_recall+Loud_recall)/3
        PCG_acc_soft_aver = (acc_metric+Soft_recall)/2 #准确率和soft找回率均值
        print("------------------------------PCG result------------------------------")
        print("Absent_recall: %.4f, Soft_recall: %.4f, Loud_recall: %.4f,PCG_UAR: %.4f"
              % (Absent_recall, Soft_recall, Loud_recall,PCG_UAR))
        a = np.sum(cm, 0)
        Absent_Precision = cm[0][0] / a[0]
        Soft_Precision = cm[1][1] / a[1]
        Loud_Precision = cm[2][2] / a[2]

        Absent_f1 = (2 * Absent_recall * Absent_Precision) / (Absent_recall + Absent_Precision)
        Soft_f1 = (2 * Soft_recall * Soft_Precision) / (Soft_recall + Soft_Precision)
        Loud_f1 = (2 * Loud_recall * Loud_Precision) / (Loud_recall + Loud_Precision)
        PCG_f1 = (Absent_f1+Soft_f1+Loud_f1)/3
        print("Absent_F1: %.4f, Soft_F1: %.4f, Loud_F1: %.4f, PCG_F1: %.4f"
              % (Absent_f1, Soft_f1, Loud_f1,PCG_f1))

        # 患者分类性能
        # 将预测标签和真实标签转换为numpy数组
        y_pred = np.array(patient_pre)
        y_true = np.array(patient_ture_label)
        # 计算混淆矩阵
        cm1 = confusion_matrix(y_true, y_pred)
        # 计算召回率 F1
        Absent_num = np.sum(cm1[0])
        Soft_num = np.sum(cm1[1])
        Loud_num = np.sum(cm1[2])
        All_num = Absent_num+Soft_num+Loud_num
        Absent_recall_patient = cm1[0][0] / Absent_num
        Soft_recall_patient = cm1[1][1] / Soft_num
        Loud_recall_patient = cm1[2][2] / Loud_num
        Patient_UAR = (Absent_recall_patient+Soft_recall_patient+Loud_recall_patient)/3
        Patient_WAR = (Absent_recall_patient*Absent_num+Soft_recall_patient*Soft_num+Loud_recall_patient*Loud_num)/All_num
        # Patient_WAcc = (Absent_num*cm1[0][0]+Soft_num*cm1[1][1]+Loud_num*cm1[2][2])/All_num
        print("------------------------------Patient result------------------------------")
        print("Absent_recall: %.4f, Soft_recall: %.4f, Loud_recall: %.4f, Patient_UAR: %.4f, Patient_WAR: %.4f"
              % (Absent_recall_patient, Soft_recall_patient, Loud_recall_patient,Patient_UAR,Patient_WAR))
        a = np.sum(cm1, 0)
        Absent_Precision_patient = cm1[0][0] / a[0]
        Soft_Precision_patient = cm1[1][1] / a[1]
        Loud_Precision_patient = cm1[2][2] / a[2]

        Absent_f1_patient = (2 * Absent_recall_patient * Absent_Precision_patient) / (
                    Absent_recall_patient + Absent_Precision_patient)
        Soft_f1_patient = (2 * Soft_recall_patient * Soft_Precision_patient) / (
                    Soft_recall_patient + Soft_Precision_patient)
        Loud_f1_patient = (2 * Loud_recall_patient * Loud_Precision_patient) / (
                    Loud_recall_patient + Loud_Precision_patient)
        Patient_f1 = (Absent_f1_patient + Soft_f1_patient + Loud_f1_patient) / 3
        print("Absent_F1: %.4f, Soft_F1: %.4f, Loud_F1: %.4f,Patient_F1: %.4f"
              % (Absent_f1_patient, Soft_f1_patient, Loud_f1_patient,Patient_f1))







        best_acc_metric = best_val_acc
        best_uar = best_val_UAR

        if epoch >=15:
            # if acc_metric > best_acc_metric:
            if Patient_f1 > best_val_F1:
                # (AV_ture_label, AV_pre_label,
                #  MV_ture_label, MV_pre_label,
                #  TV_ture_label, TV_pre_label,
                #  PV_ture_label, PV_pre_label) = location_result(all_id,all_y_pred,all_location,all_label)
                # print_cm(AV_ture_label, AV_pre_label,"AV")
                # print_cm(MV_ture_label, MV_pre_label,"MV")
                # print_cm(TV_ture_label, TV_pre_label,"TV")
                # print_cm(PV_ture_label, PV_pre_label,"PV")

            # if PCG_acc_soft_aver > best_val_acc_soft :
            # if Soft_recall  > best_val_soft:
                torch.save(
                    model,
                    os.path.join( model_path,'best_model'),
                )
                print(
                    "Saving best_model model to:",
                    os.path.join(model_path,'best_model'),
                )
                best_train_acc = train_acc / len(train_loader)
                best_val_acc = acc_metric
                best_val_patient_acc = patient_acc
                best_val_UAR = Patient_UAR
                best_val_F1  = Patient_f1
                best_val_acc_soft = PCG_acc_soft_aver
                best_val_soft = Soft_recall
                best_epoch = epoch

                best_Absent_recall = Absent_recall
                best_Soft_recall = Soft_recall
                best_Loud_recall = Loud_recall
                best_Absent_f1 = Absent_f1
                best_Soft_f1 = Soft_f1
                best_Loud_f1 = Loud_f1
                best_PCG_UAR = (Absent_recall + Soft_recall + Loud_recall) / 3
                best_PCG_f1 = PCG_f1
                best_Patient_f1 = Patient_f1


                best_Absent_recall_patient = Absent_recall_patient
                best_Soft_recall_patient = Soft_recall_patient
                best_Loud_recall_patient = Loud_recall_patient
                best_Absent_f1_patient = Absent_f1_patient
                best_Soft_f1_patient = Soft_f1_patient
                best_Loud_f1_patient = Loud_f1_patient
                best_Patient_UAR = (Absent_recall_patient + Soft_recall_patient + Loud_recall_patient) / 3
                best_Patient_WAR = Patient_WAR
                result_path = os.path.join(model_result_path, "ResultFile")
                if not os.path.exists(result_path):
                    os.makedirs(result_path)

                # PCG混淆矩阵
                # 将预测标签和真实标签转换为numpy数组
                plt.figure()
                plt.imshow(cm, cmap=plt.cm.Blues)
                plt.colorbar()
                # 显示矩阵元素的数值
                for i in range(cm.shape[0]):
                    for j in range(cm.shape[1]):
                        plt.text(j, i, cm[i, j], ha='center', va='center')
                plt.xlabel('Predicted labels')
                plt.ylabel('True labels')
                plt.xticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.yticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.title('Confusion matrix')
                plt.savefig(result_path + '/PCG Confusion matrix.png', dpi=600)
                plt.close()

                # 患者混淆矩阵

                plt.figure()
                plt.imshow(cm1, cmap=plt.cm.Blues)
                plt.colorbar()
                # 显示矩阵元素的数值
                for i in range(cm1.shape[0]):
                    for j in range(cm1.shape[1]):
                        if i == 0 and j == 0:
                            plt.text(j, i, cm1[i, j], color='white', ha='center', va='center')
                        else:
                            plt.text(j, i, cm1[i, j], ha='center', va='center')
                plt.xlabel('Predicted labels')
                plt.ylabel('True labels')
                plt.xticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.yticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.title('Confusion matrix')
                plt.savefig(result_path + '/patient Confusion matrix.png', dpi=600)
                plt.close()
            else :
                no_better_epoch = no_better_epoch+1

            # 保存验证集loss最小时的模型
            if loss_metric < best_val_loss:

                torch.save(model, os.path.join(model_path, "loss_model"))
                print(
                    "Saving loss_model model to:",
                    os.path.join(model_path, "loss_model"),
                )

                min_loss_epoch = epoch
                best_val_loss = loss_metric

            aver_PCG_acc.append(acc_metric)
            aver_PCG_UAR.append(PCG_UAR)
            aver_PCG_absent.append(Absent_recall)
            aver_PCG_soft.append(Soft_recall)
            aver_PCG_loud.append(Loud_recall)


        # if no_better_epoch == 15 :
        #     break






    torch.save(
        model,
        os.path.join(model_path, 'last_model'),
    )

    print(
        "Saving last_model model to:",
        os.path.join(model_path, 'last_model'),
    )








    # === 显示训练集和验证集loss曲线 ===
    plt.figure()
    plt.plot(all_train_loss, linewidth=1, label='Training Loss')
    plt.plot(all_val_loss, linewidth=1, label='Validation Loss')
    plt.title('Training and Validation Loss', fontsize=18)
    plt.xlabel('Epoch', fontsize=18)
    plt.ylabel('Loss', fontsize=18)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    plt.legend()

    plt.savefig(result_path+'/Training and Validation Loss.png', dpi=600)
    plt.close()

    # === 显示训练集和验证集acc曲线 ===
    plt.figure()
    plt.plot(all_train_acc, linewidth=1, label='Training Acc')
    plt.plot(all_val_acc, linewidth=1, label='Validation Acc')
    plt.title('Training and Validation Acc', fontsize=18)
    plt.xlabel('Epoch', fontsize=18)
    plt.ylabel('Loss', fontsize=18)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    plt.legend()

    plt.savefig(result_path + '/Training and Validation acc.png', dpi=600)
    plt.close()







#保存历史loss到txt文件
    np_train_acc = np.array(all_train_acc).reshape((len(all_train_acc), 1))  # reshape是为了能够跟别的信息组成矩阵一起存储
    np_train_loss = np.array(all_train_loss).reshape((len(all_train_loss), 1))
    np_val_acc = np.array(all_val_acc).reshape((len(all_val_acc), 1))  # reshape是为了能够跟别的信息组成矩阵一起存储
    np_val_loss = np.array(all_val_loss).reshape((len(all_val_loss), 1))
    np_out = np.concatenate([np_train_acc, np_val_acc,np_train_loss,np_val_loss], axis=1)

    f = result_path+"/save_result.txt"
    # if not os.path.isdir(f):
    #     os.makedirs(f)
    mytime = datetime.now()
    with open(f, "a") as file:
        file.write("===============================================================================" + "\n")
        file.write(str(mytime) + "\n")
        # file.write("# encoder layers = " + str(encoders) + "\n")
        # file.write("# img_size = " + str(img_size) + "\n")
        # file.write("# patch size = " + str(patch_size) + "\n")
        # file.write("# num_heads = " + str(num_heads) + "\n")
        file.write("# num_epochs = " + str(num_epochs) + "\n")
        # file.write("# train_batch_size = " + str(train_batch_size) + "\n")
        # file.write("# test_batch_size = " + str(test_batch_size) + "\n")
        file.write("# learning_rate = " + str(learning_rate) + "\n")
        file.write("# best_epoch = " + str(best_epoch) + "\n")
        file.write("# min_loss_epoch = " + str(min_loss_epoch) + "\n")
        file.write("# train_acc = " + str('{:.4f}'.format(best_train_acc)) + "\n")
        file.write("# val_acc = " + str('{:.4f}'.format(best_val_acc)) + "\n")
        file.write("# val_patient_acc = " + str('{:.4f}'.format(best_val_patient_acc)) + "\n")
        file.write("-----------------average_results----------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(sum(aver_PCG_absent)/len(aver_PCG_absent)))
                   + "  Soft: " + str('{:.4f}'.format(sum(aver_PCG_soft)/len(aver_PCG_soft)))
                   + "  Loud: " + str('{:.4f}'.format(sum(aver_PCG_loud)/len(aver_PCG_loud)))
                   +  "  PCG_UAR: " + str('{:.4f}'.format(sum(aver_PCG_UAR)/len(aver_PCG_UAR)))
                   + "  PCG_Acc: " + str('{:.4f}'.format(sum(aver_PCG_acc)/len(aver_PCG_acc)))
                   +"\n")
        file.write("-----------------PCG_vali_recall----------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_recall))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_recall))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_recall))
                   +  "  PCG_UAR: " + str('{:.4f}'.format(best_PCG_UAR))
                   +"\n")
        file.write("-------------------PCG_vali_F1------------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_f1))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_f1))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_f1))
                   + "\n")
        file.write("-----------------patient_vali_recall----------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_recall_patient))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_recall_patient))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_recall_patient))
                   + "  Patient_UAR: " + str('{:.4f}'.format(best_Patient_UAR))
                   + "  Patient_WAR: " + str('{:.4f}'.format(best_Patient_WAR))
                   + "\n")
        file.write("-------------------patient_vali_F1------------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_f1_patient))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_f1_patient))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_f1_patient))
                   + "  Average: " + str('{:.4f}'.format(best_Patient_f1))
                   + "\n")
        # file.write('train_acc    val_acc   train_loss    val_loss' + "\n")
        # for i in range(len(np_out)):
        #     file.write(str(np_out[i]) + '\n')
    print("save result successful!!!")



















