import os
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import CosineAnnealingLR
import torch
import torch.nn as nn
import numpy as np
import logging
from datetime import datetime
import sys
from torch.utils.data import DataLoader, Dataset
from SE_ResNet import MyModel as Resnet
from SE_ResNet import resblock
from ViT import VisionTransformer
from ViT_ResNet import VisionTransformer_ResNet
from torch.utils.data import DataLoader, WeightedRandomSampler
from torch.utils.tensorboard import SummaryWriter
from patient_information import find_patient_files,load_patient_data,get_grade_number,get_murmur,get_num_locations,get_locations



class FocalLoss(nn.Module):
    def __init__(self, gamma=2, weight=None):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.weight = weight

    def forward(self, inputs, targets):
        ce_loss = nn.CrossEntropyLoss(weight=self.weight)(inputs, targets)  # 使用交叉熵损失函数计算基础损失
        pt = torch.exp(-ce_loss)  # 计算预测的概率
        focal_loss = (1 - pt) ** self.gamma * ce_loss  # 根据Focal Loss公式计算Focal Loss
        return focal_loss.mean()

class NewDataset(Dataset):

    def __init__(self, wav_label, wav_data,wav_index):

        self.data = torch.from_numpy(wav_data)
        self.label = torch.LongTensor(wav_label)
        self.dex = torch.LongTensor(wav_index)

    def __getitem__(self, index):
        # 根据索引返回数据和对应的标签
        data_item = self.data[index]
        label_item = self.label[index]
        dex_item = self.dex[index]

        return data_item.float(), label_item.int(),dex_item.int()

    def __len__(self):
        # 返回文件数据的数目
        return len(self.data)

class TrianDataset(Dataset):

    def __init__(self, wav_label, wav_data):

        self.data = torch.from_numpy(wav_data)
        self.label = torch.LongTensor(wav_label)

    def __getitem__(self, index):
        # 根据索引返回数据和对应的标签
        data_item = self.data[index]
        label_item = self.label[index]

        return data_item.float(), label_item.int()

    def __len__(self):
        # 返回文件数据的数目
        return len(self.data)

class MyDataset(Dataset):

    def __init__(self, wav_label, wav_data, wav_location, wav_id):
        # 直接传递data和label
        # self.len = wavlen
        self.data = torch.from_numpy(wav_data)
        self.label = torch.LongTensor(wav_label)
        self.location = wav_location
        self.id = wav_id

    def __getitem__(self, index):
        # 根据索引返回数据和对应的标签
        data_item = self.data[index]
        label_item = self.label[index]
        location_item = self.location[index]
        location_item = torch.tensor([ord(c) for c in location_item], dtype=torch.long)
        id_item = self.id[index]
        id_item = torch.tensor([ord(c) for c in id_item], dtype=torch.long)
        return data_item.float(), label_item.int(), location_item, id_item

    def __len__(self):
        # 返回文件数据的数目
        return len(self.data)
####问题： location 和id 的长度不一样 导致后面DataLoader时，每个batch里 拼接不上

# 定义欠采样、过采样函数
def undersample_data(data, target_count):
    if len(data) > target_count:
        return data[:target_count]
    else:
        return data


def oversample_data(data, target_count):
    data = data.clone()
    while len(data) < target_count:
        data = torch.cat((data, data), dim=0)[:target_count]
    return data


def cal_patient_acc(path,all_id,all_y_pred,all_location):
    patient_files = find_patient_files(path)
    num_patient_files = len(patient_files)
    ture_label = list()  # 存放病人的真是标签
    pre_label = list()  # 存放病人的预测标签
    num_correct_patient = 0
    for i in range(num_patient_files):
        current_patient_data = load_patient_data(patient_files[i])
        num_location = get_num_locations(current_patient_data)
        current_ID = current_patient_data.split(" ")[0]
        current_locations = get_locations(current_patient_data)
        current_patient_pre = torch.zeros(num_location, 3)
        current_patient_label = get_grade_number(current_patient_data)
        # 计算当前ID下各个听诊区预测概率的和
        for n in range(len(all_id)):
            if all_id[n] == current_ID:
                if all_location[n] in current_locations:
                    j = current_locations.index(all_location[n])
                    current_patient_pre[j] += all_y_pred[n]
        _, current_patient_pre_label = current_patient_pre.max(1)
        # 计算该患者最终的label预测结果
        current_label = 0
        num_zeros = 0
        for n in range(len(current_patient_pre_label)):
            if current_patient_pre_label[n] == 0:
                num_zeros += 1
        if num_zeros == num_location:  # 所听诊区都无杂音
            current_label = 0
        elif num_zeros == 0:  # 所有听诊区都有杂
            current_label = max(current_patient_pre_label)
        else:
            current_label = 1
        if current_label == current_patient_label:
            num_correct_patient += 1
        pre_label.append(current_label)
        ture_label.append(current_patient_label)

    patient_acc = num_correct_patient / num_patient_files
    # print("patient Acc: %.4f" % patient_acc)
    return pre_label, ture_label,patient_acc







if __name__ == "__main__":
    fold_path = 'data_5fold_new2/1_fold'
    data_path = os.path.join(fold_path,'logmel')
    label_path = os.path.join(fold_path,'label')

    data_train = np.load(data_path + '/train_feature_expand2.npy', allow_pickle=True)
    data_vali = np.load(data_path + '/vali_feature.npy', allow_pickle=True)

    label_train = np.load(label_path + '/train_label_expand2.npy', allow_pickle=True)
    label_vali = np.load(label_path + '/vali_label.npy', allow_pickle=True)

    # location_train = np.load(label_path + '/train_location.npy', allow_pickle=True)
    location_vali = np.load(label_path + '/vali_location.npy', allow_pickle=True)
    # print(location_train[0])
    # id_train = np.load(label_path + '/train_id.npy', allow_pickle=True)
    id_vali = np.load(label_path + '/vali_id.npy', allow_pickle=True)
    # print(id_train[1])
    # index_train = np.load(label_path + '/train_index.npy', allow_pickle=True)
    index_vali = np.load(label_path + '/vali_index.npy', allow_pickle=True)

    train_set = TrianDataset(wav_label=label_train, wav_data=data_train)
    vali_set = NewDataset(wav_label=label_vali, wav_data=data_vali, wav_index=index_vali)
    data1=train_set.data[0]
    print(data1.shape)
    # print(train_set.id[0])
    # print(train_set.location)
    # print(train_set.location[0])
    # print(train_set.label[0])
    num_classes = 3
    class_count = [0] * num_classes
    for data, label in train_set:
        class_count[label] += 1
    print("train_set:",'absent:', class_count[0], 'soft:', class_count[1], 'loud:', class_count[2])
    class_count = [0] * num_classes
    for data, label, index in vali_set:
        class_count[label] += 1
    print("vali_set:",'absent:', class_count[0], 'soft:', class_count[1], 'loud:', class_count[2])

    # 计算每个类别的采样权重
    target_count = max(class_count)
    class_weights = [target_count / count for count in class_count]
    # 创建权重采样器
    weights = [class_weights[label] for _, label in train_set]
    weighted_sampler = WeightedRandomSampler(weights, len(train_set), replacement=True)


    train_batch_size = 128
    test_batch_size = 128
    learning_rate = 0.001
    num_epochs =80
    img_size = (32,80)
    patch_size=(8,8)
    encoders=5
    num_heads = 12
    # ========================/ dataloader /========================== #
    # train_loader = DataLoader(train_set, batch_size=train_batch_size,  shuffle=True, drop_last=True)
    train_loader = DataLoader(train_set, batch_size=train_batch_size, sampler=weighted_sampler)
    test_loader = DataLoader(vali_set, batch_size=test_batch_size, shuffle=True, drop_last=True)
    print("Dataloader is ok")

    model = Resnet(resblock, 1, 3)
    # model = VisionTransformer(img_size=img_size,
    #                           patch_size=patch_size,
    #                           in_c=1,
    #                           embed_dim=768,
    #                           depth=encoders,
    #                           num_heads=num_heads,
    #                           num_classes=3
    #                           )
    # model = VisionTransformer_ResNet(img_size=img_size,
    #                           patch_size=patch_size,
    #                           in_c=1,
    #                           embed_dim=768,
    #                           depth=encoders,
    #                           num_heads=num_heads,
    #                           num_classes=3
    #                           )
    # model = torch.load('ViT_result/data_5fold/3_fold/model/last_model')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)  # 放到设备中


    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-7)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.92, last_epoch=-1)
    criterion = nn.CrossEntropyLoss()
    # criterion = FocalLoss(gamma=2, weight=None)

    all_train_loss = []
    all_train_acc = []
    all_val_loss = []
    all_val_acc = []
    best_val_acc = -np.inf
    best_val_UAR = -np.inf
    best_val_patient_acc = -np.inf
    best_val_loss = 1
    best_train_acc = -np.inf
    # train model
    for epoch in range(num_epochs):
        train_loss = 0.0
        train_acc = 0.0
        model.train()
        all_y_pred = []
        for batch_idx, data in enumerate(train_loader):
            x, y = data
            x = x.to(device)
            y = y.to(device)
            # z = z.to(device)
            optimizer.zero_grad()
            outputs = model(x)
            loss = criterion(outputs, y.long())
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, y_pred = outputs.max(1)
            num_correct = (y_pred == y).sum().item()
            acc = num_correct / train_batch_size
            train_acc += acc
            all_y_pred.append(y_pred.cpu().detach())
        scheduler.step()
        print("第%d个epoch的学习率：%f" % (epoch, optimizer.param_groups[0]['lr']))
        all_train_acc.append(train_acc / len(train_loader))
        all_train_loss.append(train_loss / len(train_loader))

        # evaluate model
        model.eval()
        val_loss = 0.0
        val_acc = 0.0

        all_y_pred = []        #存放3s样本的预测概率
        all_y_pred_label = [] #存放3s样本的真实标签
        all_label=[]          #存放3s样本的预测标签
        all_id = []
        all_location = []
        with torch.no_grad():
            for i, data in enumerate(test_loader):
                x, y, z = data
                x = x.to(device)
                y = y.to(device)
                z = z.to(device)
                optimizer.zero_grad()
                outputs = model(x)
                loss = criterion(outputs, y.long())
                val_loss += loss.item()
                _, y_pred = outputs.max(1)
                num_correct = (y_pred == y).sum().item()
                acc = num_correct / test_batch_size
                val_acc += acc
                all_y_pred.append(outputs.cpu().detach())
                all_label.append(y.cpu().detach())
                all_y_pred_label.append(y_pred.cpu().detach())
                for ii in range(test_batch_size):
                    all_id.append(id_vali[z[ii].cpu().detach()])
                    all_location.append(location_vali[z[ii].cpu().detach()])

        all_y_pred = np.vstack(all_y_pred)

        all_label = np.hstack(all_label)
        all_y_pred_label = np.hstack(all_y_pred_label)

        all_val_acc.append(val_acc / len(test_loader))
        all_val_loss.append(val_loss / len(test_loader))

        acc_metric = val_acc / len(test_loader)
        loss_metric = val_loss / len(test_loader)

        # 计算人头的准确率
        vali_data_directory = os.path.join(fold_path,"vali_data")
        patient_pre, patient_ture_label, patient_acc = cal_patient_acc(vali_data_directory, all_id, all_y_pred,
                                                                       all_location)
        print("======================================================================================================================")
        print(
            "Epoch: %d, Train Loss: %.4f, Train Acc: %.4f, Val Loss: %.4f, "
            "Val Acc: %.4f,patient Acc: %.4f "
            % (
                epoch,
                train_loss / len(train_loader),
                train_acc / len(train_loader),
                val_loss / len(test_loader),
                val_acc / len(test_loader),
                patient_acc

            )
        )
        # PCG分类性能
        # 将预测标签和真实标签转换为numpy数组
        y_pred = np.array(all_y_pred_label)
        y_true = np.array(all_label)
        # 计算混淆矩阵
        cm = confusion_matrix(y_true, y_pred)
        # 计算召回率 F1
        Absent_num = np.sum(cm[0])
        Soft_num = np.sum(cm[1])
        Loud_num = np.sum(cm[2])
        Absent_recall = cm[0][0] / Absent_num
        Soft_recall = cm[1][1] / Soft_num
        Loud_recall = cm[2][2] / Loud_num

        PCG_UAR = (Absent_recall+Soft_recall+Loud_recall)/3
        print("------------------------------PCG result------------------------------")
        print("Absent_recall: %.4f, Soft_recall: %.4f, Loud_recall: %.4f,PCG_UAR: %.4f"
              % (Absent_recall, Soft_recall, Loud_recall,PCG_UAR))
        a = np.sum(cm, 0)
        Absent_Precision = cm[0][0] / a[0]
        Soft_Precision = cm[1][1] / a[1]
        Loud_Precision = cm[2][2] / a[2]

        Absent_f1 = (2 * Absent_recall * Absent_Precision) / (Absent_recall + Absent_Precision)
        Soft_f1 = (2 * Soft_recall * Soft_Precision) / (Soft_recall + Soft_Precision)
        Loud_f1 = (2 * Loud_recall * Loud_Precision) / (Loud_recall + Loud_Precision)
        print("Absent_F1: %.4f, Soft_F1: %.4f, Loud_F1: %.4f"
              % (Absent_f1, Soft_f1, Loud_f1))

        # 患者分类性能
        # 将预测标签和真实标签转换为numpy数组
        y_pred = np.array(patient_pre)
        y_true = np.array(patient_ture_label)
        # 计算混淆矩阵
        cm1 = confusion_matrix(y_true, y_pred)
        # 计算召回率 F1
        Absent_num = np.sum(cm1[0])
        Soft_num = np.sum(cm1[1])
        Loud_num = np.sum(cm1[2])
        Absent_recall_patient = cm1[0][0] / Absent_num
        Soft_recall_patient = cm1[1][1] / Soft_num
        Loud_recall_patient = cm1[2][2] / Loud_num
        Patient_UAR = (Absent_recall_patient+Soft_recall_patient+Loud_recall_patient)/3
        print("------------------------------Patient result------------------------------")
        print("Absent_recall: %.4f, Soft_recall: %.4f, Loud_recall: %.4f,Patient_UAR: %.4f"
              % (Absent_recall_patient, Soft_recall_patient, Loud_recall_patient,Patient_UAR))
        a = np.sum(cm1, 0)
        Absent_Precision_patient = cm1[0][0] / a[0]
        Soft_Precision_patient = cm1[1][1] / a[1]
        Loud_Precision_patient = cm1[2][2] / a[2]

        Absent_f1_patient = (2 * Absent_recall_patient * Absent_Precision_patient) / (
                    Absent_recall_patient + Absent_Precision_patient)
        Soft_f1_patient = (2 * Soft_recall_patient * Soft_Precision_patient) / (
                    Soft_recall_patient + Soft_Precision_patient)
        Loud_f1_patient = (2 * Loud_recall_patient * Loud_Precision_patient) / (
                    Loud_recall_patient + Loud_Precision_patient)
        print("Absent_F1: %.4f, Soft_F1: %.4f, Loud_F1: %.4f"
              % (Absent_f1_patient, Soft_f1_patient, Loud_f1_patient))






        ViT_result_path = os.path.join('ResNet_result',fold_path)
        #保存验证集准确率最大时的模型
        model_path = os.path.join(ViT_result_path, "model")
        if not os.path.exists(model_path):
            os.makedirs(model_path)
        best_acc_metric = best_val_acc
        best_uar = best_val_UAR
        if epoch > 35:
            if acc_metric > best_acc_metric:
            # if PCG_UAR > best_val_UAR:
                torch.save(
                    model,
                    os.path.join( model_path,'best_model'),
                )
                print(
                    "Saving best_model model to:",
                    os.path.join(model_path,'best_model'),
                )
                best_train_acc = train_acc / len(train_loader)
                best_val_acc = acc_metric
                best_val_patient_acc = patient_acc
                best_val_UAR = PCG_UAR

                best_Absent_recall = Absent_recall
                best_Soft_recall = Soft_recall
                best_Loud_recall = Loud_recall
                best_Absent_f1 = Absent_f1
                best_Soft_f1 = Soft_f1
                best_Loud_f1 = Loud_f1

                best_Absent_recall_patient = Absent_recall_patient
                best_Soft_recall_patient = Soft_recall_patient
                best_Loud_recall_patient = Loud_recall_patient
                best_Absent_f1_patient = Absent_f1_patient
                best_Soft_f1_patient = Soft_f1_patient
                best_Loud_f1_patient = Loud_f1_patient

                result_path = os.path.join(ViT_result_path, "ResultFile")
                if not os.path.exists(result_path):
                    os.makedirs(result_path)

                # PCG混淆矩阵
                # 将预测标签和真实标签转换为numpy数组
                plt.figure()
                plt.imshow(cm, cmap=plt.cm.Blues)
                plt.colorbar()
                # 显示矩阵元素的数值
                for i in range(cm.shape[0]):
                    for j in range(cm.shape[1]):
                        plt.text(j, i, cm[i, j], ha='center', va='center')
                plt.xlabel('Predicted labels')
                plt.ylabel('True labels')
                plt.xticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.yticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.title('Confusion matrix')
                plt.savefig(result_path + '/PCG Confusion matrix.png', dpi=600)
                plt.close()

                # 患者混淆矩阵

                plt.figure()
                plt.imshow(cm1, cmap=plt.cm.Blues)
                plt.colorbar()
                # 显示矩阵元素的数值
                for i in range(cm1.shape[0]):
                    for j in range(cm1.shape[1]):
                        if i == 0 and j == 0:
                            plt.text(j, i, cm1[i, j], color='white', ha='center', va='center')
                        else:
                            plt.text(j, i, cm1[i, j], ha='center', va='center')
                plt.xlabel('Predicted labels')
                plt.ylabel('True labels')
                plt.xticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.yticks([0, 1, 2], ['absent', 'soft', 'loud'])
                plt.title('Confusion matrix')
                plt.savefig(result_path + '/patient Confusion matrix.png', dpi=600)
                plt.close()

            # 保存验证集loss最小时的模型
            if loss_metric < best_val_loss:

                torch.save(model, os.path.join(model_path, "loss_model"))
                print(
                    "Saving loss_model model to:",
                    os.path.join(model_path, "loss_model"),
                )
                best_val_loss = loss_metric







    torch.save(
        model,
        os.path.join(model_path, 'last_model'),
    )
    print(
        "Saving last_model model to:",
        os.path.join(model_path, 'last_model'),
    )



    # === 显示训练集和验证集loss曲线 ===
    plt.figure()
    plt.plot(all_train_loss, linewidth=1, label='Training Loss')
    plt.plot(all_val_loss, linewidth=1, label='Validation Loss')
    plt.title('Training and Validation Loss', fontsize=18)
    plt.xlabel('Epoch', fontsize=18)
    plt.ylabel('Loss', fontsize=18)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    plt.legend()

    plt.savefig(result_path+'/Training and Validation Loss.png', dpi=600)
    plt.close()

    # === 显示训练集和验证集acc曲线 ===
    plt.figure()
    plt.plot(all_train_acc, linewidth=1, label='Training Acc')
    plt.plot(all_val_acc, linewidth=1, label='Validation Acc')
    plt.title('Training and Validation Acc', fontsize=18)
    plt.xlabel('Epoch', fontsize=18)
    plt.ylabel('Loss', fontsize=18)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    plt.legend()

    plt.savefig(result_path + '/Training and Validation acc.png', dpi=600)
    plt.close()







#保存历史loss到txt文件
    np_train_acc = np.array(all_train_acc).reshape((len(all_train_acc), 1))  # reshape是为了能够跟别的信息组成矩阵一起存储
    np_train_loss = np.array(all_train_loss).reshape((len(all_train_loss), 1))
    np_val_acc = np.array(all_val_acc).reshape((len(all_val_acc), 1))  # reshape是为了能够跟别的信息组成矩阵一起存储
    np_val_loss = np.array(all_val_loss).reshape((len(all_val_loss), 1))
    np_out = np.concatenate([np_train_acc, np_val_acc,np_train_loss,np_val_loss], axis=1)

    f = result_path+"/save_result.txt"
    # if not os.path.isdir(f):
    #     os.makedirs(f)
    mytime = datetime.now()
    with open(f, "a") as file:
        file.write("===============================================================================" + "\n")
        file.write(str(mytime) + "\n")
        file.write("# encoder layers = " + str(encoders) + "\n")
        file.write("# img_size = " + str(img_size) + "\n")
        file.write("# patch size = " + str(patch_size) + "\n")
        file.write("# num_heads = " + str(num_heads) + "\n")
        file.write("# num_epochs = " + str(num_epochs) + "\n")
        file.write("# train_batch_size = " + str(train_batch_size) + "\n")
        file.write("# test_batch_size = " + str(test_batch_size) + "\n")
        file.write("# learning_rate = " + str(learning_rate) + "\n")
        file.write("# train_acc = " + str('{:.4f}'.format(best_train_acc)) + "\n")
        file.write("# val_acc = " + str('{:.4f}'.format(best_val_acc)) + "\n")
        file.write("# val_patient_acc = " + str('{:.4f}'.format(best_val_patient_acc)) + "\n")
        file.write("-----------------PCG_vali_recall----------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_recall))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_recall))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_recall))
                   + "\n")
        file.write("-------------------PCG_vali_F1------------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_f1))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_f1))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_f1))
                   + "\n")
        file.write("-----------------patient_vali_recall----------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_recall_patient))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_recall_patient))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_recall_patient))
                   + "\n")
        file.write("-------------------patient_vali_F1------------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(best_Absent_f1_patient))
                   + "  Soft: " + str('{:.4f}'.format(best_Soft_f1_patient))
                   + "  Loud: " + str('{:.4f}'.format(best_Loud_f1_patient))
                   + "\n")
        # file.write('train_acc    val_acc   train_loss    val_loss' + "\n")
        # for i in range(len(np_out)):
        #     file.write(str(np_out[i]) + '\n')
    print("save result successful!!!")



















