import os
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import numpy as np
import logging
from datetime import datetime
import sys
from torch.utils.data import DataLoader, Dataset
from SE_ResNet import MyModel as Resnet
from SE_ResNet import resblock
from torch.utils.data import DataLoader, WeightedRandomSampler
from torch.utils.tensorboard import SummaryWriter
from patient_information import find_patient_files,load_patient_data,get_grade_number,get_murmur,get_num_locations,get_locations


# # ========================/ logging init /========================== #
# # ========================/ logging init /========================== #
# def logger_init(log_level=logging.DEBUG,
#                 log_dir='ResultFile',
#                 ):
#     # 指定路径
#     if not os.path.exists(log_dir):
#         os.makedirs(log_dir)
#
#     date = datetime.now()
#     log_path = os.path.join(log_dir, str(date)[:13] + '-' + str(date.minute) + '.log')
#     formatter = '[%(asctime)s - %(levelname)s:] %(message)s'
#     logging.basicConfig(level=log_level,
#                         format=formatter,
#                         datefmt='%Y-%d-%m %H:%M:%S',
#                         handlers=[logging.FileHandler(log_path),
#                                   logging.StreamHandler(sys.stdout)]
#                         )
#
# # ========================/ logging formate /========================== #
# class save_info(object):
#
#     def __init__(self,epoch,train_loss,train_acc,test_loss,test_acc) :
#         self.epoch=epoch
#         self.train_acc = train_acc
#         self.train_loss=train_loss
#         self.test_acc=test_acc
#         self.test_loss=test_loss
#         logging.info(f"epoch: "+str(self.epoch))
#         logging.info(f"train_loss: "+str('{:.3f}'.format(self.train_loss))+"  train_acc: "+str('{:.3f}'.format(self.train_acc)))
#         logging.info(f"vali_loss: "+str('{:.3f}'.format(self.test_loss))+"  vali_acc: "+str('{:.3f}'.format(self.test_acc)))
#         logging.info(f"=========================================")

class NewDataset(Dataset):

    def __init__(self, wav_label, wav_data,wav_index):

        self.data = torch.from_numpy(wav_data)
        self.label = torch.LongTensor(wav_label)
        self.dex = torch.LongTensor(wav_index)

    def __getitem__(self, index):
        # 根据索引返回数据和对应的标签
        data_item = self.data[index]
        label_item = self.label[index]
        dex_item = self.dex[index]

        return data_item.float(), label_item.int(),dex_item.int()

    def __len__(self):
        # 返回文件数据的数目
        return len(self.data)

class MyDataset(Dataset):

    def __init__(self, wav_label, wav_data, wav_location, wav_id):
        # 直接传递data和label
        # self.len = wavlen
        self.data = torch.from_numpy(wav_data)
        self.label = torch.LongTensor(wav_label)
        self.location = wav_location
        self.id = wav_id

    def __getitem__(self, index):
        # 根据索引返回数据和对应的标签
        data_item = self.data[index]
        label_item = self.label[index]
        location_item = self.location[index]
        location_item = torch.tensor([ord(c) for c in location_item], dtype=torch.long)
        id_item = self.id[index]
        id_item = torch.tensor([ord(c) for c in id_item], dtype=torch.long)
        return data_item.float(), label_item.int(), location_item, id_item

    def __len__(self):
        # 返回文件数据的数目
        return len(self.data)
####问题： location 和id 的长度不一样 导致后面DataLoader时，每个batch里 拼接不上

# 定义欠采样、过采样函数
def undersample_data(data, target_count):
    if len(data) > target_count:
        return data[:target_count]
    else:
        return data


def oversample_data(data, target_count):
    data = data.clone()
    while len(data) < target_count:
        data = torch.cat((data, data), dim=0)[:target_count]
    return data


def cal_patient_acc(path,all_id,all_y_pred,all_location):
    patient_files = find_patient_files(path)
    num_patient_files = len(patient_files)
    ture_label = list()  # 存放病人的真是标签
    pre_label = list()  # 存放病人的预测标签
    num_correct_patient = 0
    for i in range(num_patient_files):
        current_patient_data = load_patient_data(patient_files[i])
        num_location = get_num_locations(current_patient_data)
        current_ID = current_patient_data.split(" ")[0]
        current_locations = get_locations(current_patient_data)
        current_patient_pre = torch.zeros(num_location, 3)
        current_patient_label = get_grade_number(current_patient_data)
        # 计算当前ID下各个听诊区预测概率的和
        for n in range(len(all_id)):
            if all_id[n] == current_ID:
                if all_location[n] in current_locations:
                    j = current_locations.index(all_location[n])
                    current_patient_pre[j] += all_y_pred[n]
        _, current_patient_pre_label = current_patient_pre.max(1)
        # 计算该患者最终的label预测结果
        current_label = 0
        num_zeros = 0
        for n in range(len(current_patient_pre_label)):
            if current_patient_pre_label[n] == 0:
                num_zeros += 1
        if num_zeros == num_location:  # 所听诊区都无杂音
            current_label = 0
        elif num_zeros == 0:  # 所有听诊区都有杂
            current_label = max(current_patient_pre_label)
        else:
            current_label = 1
        if current_label == current_patient_label:
            num_correct_patient += 1
        pre_label.append(current_label)
        ture_label.append(current_patient_label)

    patient_acc = num_correct_patient / num_patient_files
    # print("patient Acc: %.4f" % patient_acc)
    return pre_label, ture_label,patient_acc







if __name__ == "__main__":
    data_path = 'data/logmel'
    label_path = 'data/label'
    data_train = np.load(data_path + '/train_feature.npy', allow_pickle=True)
    data_vali = np.load(data_path + '/vali_feature.npy', allow_pickle=True)

    label_train = np.load(label_path + '/train_label.npy', allow_pickle=True)
    label_vali = np.load(label_path + '/vali_label.npy', allow_pickle=True)

    location_train = np.load(label_path + '/train_location.npy', allow_pickle=True)
    location_vali = np.load(label_path + '/vali_location.npy', allow_pickle=True)
    # print(location_train[0])
    id_train = np.load(label_path + '/train_id.npy', allow_pickle=True)
    id_vali = np.load(label_path + '/vali_id.npy', allow_pickle=True)
    # print(id_train[1])
    index_train = np.load(label_path + '/train_index.npy', allow_pickle=True)
    index_vali = np.load(label_path + '/vali_index.npy', allow_pickle=True)

    train_set = NewDataset(wav_label=label_train, wav_data=data_train, wav_index=index_train)
    test_set = NewDataset(wav_label=label_vali, wav_data=data_vali, wav_index=index_vali)
    data1=train_set.data[0]
    print(data1.shape)
    # print(train_set.id[0])
    # print(train_set.location)
    # print(train_set.location[0])
    # print(train_set.label[0])
    num_classes = 3
    class_count = [0] * num_classes
    for data, label, index in train_set:
        class_count[label] += 1
    print("train_set:",'absent:', class_count[0], 'soft:', class_count[1], 'loud:', class_count[2])
    class_count = [0] * num_classes
    for data, label, index in test_set:
        class_count[label] += 1
    print("vali_set:",'absent:', class_count[0], 'soft:', class_count[1], 'loud:', class_count[2])

    # 计算每个类别的采样权重
    target_count = max(class_count)
    class_weights = [target_count / count for count in class_count]
    # 创建权重采样器
    weights = [class_weights[label] for _, label, _ in train_set]
    weighted_sampler = WeightedRandomSampler(weights, len(train_set), replacement=True)


    train_batch_size = 128
    test_batch_size = 128
    learning_rate = 0.001
    num_epochs = 100
    # ========================/ dataloader /========================== #
    train_loader = DataLoader(train_set, batch_size=train_batch_size,  shuffle=True, drop_last=True)
    # train_loader = DataLoader(train_set, batch_size=train_batch_size, sampler=weighted_sampler)
    test_loader = DataLoader(test_set, batch_size=test_batch_size, shuffle=True, drop_last=True)
    print("Dataloader is ok")
    # logger_init()
    # logging.info("# train_batch_size = " + str(train_batch_size))
    # logging.info("# test_batch_size = " + str(test_batch_size))
    # logging.info("# learning_rate = " + str(learning_rate))
    # logging.info("# num_epochs = " + str(num_epochs))
    # logging.info("----------------------------------------------------")
    # writer = SummaryWriter(r'./tensorboard/' + str(datetime.now())[:13])

    # # 查看输入数据大小
    # for data, label, index in train_loader:
    #     print('!')
    #     print(index)
    #     print(data.shape)
    #     print(label)
    #     print(id_train[index])
    #     print(location_train[index])
    #     model = Resnet(resblock, 1, 3)
    #     outputs = model(data)
    #     print(outputs.shape)
    #     _,pred2 = outputs.max(1)
    #     print(pred2)
    #     _, y_pred = outputs.max(1)
    #     num_correct = (y_pred == label).sum().item()
    #     acc = num_correct / data.shape[0]
    #     print(acc)
    #     exit()
    model = Resnet(resblock, 1, 3)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)  # 放到设备中
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-7)
    criterion = nn.CrossEntropyLoss()

    all_train_loss = []
    all_train_acc = []
    all_val_loss = []
    all_val_acc = []
    best_val_acc = -np.inf
    best_val_patient_acc = -np.inf
    best_val_loss = 1
    best_train_acc = -np.inf
    # train model
    for epoch in range(num_epochs):
        train_loss = 0.0
        train_acc = 0.0
        model.train()
        all_y_pred = []
        for batch_idx, data in enumerate(train_loader):
            x, y, z = data
            x = x.to(device)
            y = y.to(device)
            z = z.to(device)
            optimizer.zero_grad()
            outputs = model(x)
            loss = criterion(outputs, y.long())
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
            _, y_pred = outputs.max(1)
            num_correct = (y_pred == y).sum().item()
            acc = num_correct / train_batch_size
            train_acc += acc
            all_y_pred.append(y_pred.cpu().detach())

        all_train_acc.append(train_acc / len(train_loader))
        all_train_loss.append(train_loss / len(train_loader))

        # evaluate model
        model.eval()
        val_loss = 0.0
        val_acc = 0.0

        all_y_pred = []        #存放3s样本的预测概率
        all_y_pred_label = [] #存放3s样本的真实标签
        all_label=[]          #存放3s样本的预测标签
        all_id = []
        all_location = []
        with torch.no_grad():
            for i, data in enumerate(test_loader):
                x, y, z = data
                x = x.to(device)
                y = y.to(device)
                z = z.to(device)
                optimizer.zero_grad()
                outputs = model(x)
                loss = criterion(outputs, y.long())
                val_loss += loss.item()
                _, y_pred = outputs.max(1)
                num_correct = (y_pred == y).sum().item()
                acc = num_correct / test_batch_size
                val_acc += acc
                all_y_pred.append(outputs.cpu().detach())
                all_label.append(y.cpu().detach())
                all_y_pred_label.append(y_pred.cpu().detach())
                for ii in range(test_batch_size):
                    all_id.append(id_vali[z[ii].cpu().detach()])
                    all_location.append(location_vali[z[ii].cpu().detach()])

        all_y_pred = np.vstack(all_y_pred)

        all_label = np.hstack(all_label)
        all_y_pred_label = np.hstack(all_y_pred_label)

        all_val_acc.append(val_acc / len(test_loader))
        all_val_loss.append(val_loss / len(test_loader))

        acc_metric = val_acc / len(test_loader)
        loss_metric = val_loss / len(test_loader)

        # 计算人头的准确率
        vali_data_directory = "data/stratified_data_new/vali_data"
        patient_pre, patient_ture_label, patient_acc = cal_patient_acc(vali_data_directory, all_id, all_y_pred,
                                                                       all_location)
        print("======================================================================================================================")
        print(
            "Epoch: %d, Train Loss: %.4f, Train Acc: %.4f, Val Loss: %.4f, "
            "Val Acc: %.4f,patient Acc: %.4f "
            % (
                epoch,
                train_loss / len(train_loader),
                train_acc / len(train_loader),
                val_loss / len(test_loader),
                val_acc / len(test_loader),
                patient_acc

            )
        )

        # 保存验证集loss最小时的模型
        if loss_metric < best_val_loss:
            model_path = "loss_model"
            torch.save(model, os.path.join('model/', model_path))
            print(
                "Saving loss_model model to:",
                os.path.join('model/', model_path),
            )
            best_val_loss = loss_metric




        #保存验证集准确率最大时的模型
        best_acc_metric = best_val_acc
        model_dir='model'
        if not os.path.isdir(model_dir):
            os.makedirs(model_dir)
        if acc_metric > best_acc_metric:
            model_path = "best_model"

            torch.save(
                model,
                os.path.join('model/', model_path),
            )
            print(
                "Saving best_model model to:",
                os.path.join('model/', model_path),
            )
            best_train_acc = train_acc
            best_val_acc = acc_metric
            best_val_patient_acc = patient_acc

            # PCG混淆矩阵
            # 将预测标签和真实标签转换为numpy数组
            y_pred = np.array(all_y_pred_label)
            y_true = np.array(all_label)
            # 计算混淆矩阵
            cm = confusion_matrix(y_true, y_pred)
            # 绘制混淆矩阵
            plt.figure()
            plt.imshow(cm, cmap=plt.cm.Blues)
            plt.colorbar()
            # 显示矩阵元素的数值
            for i in range(cm.shape[0]):
                for j in range(cm.shape[1]):
                    plt.text(j, i, cm[i, j], ha='center', va='center')
            plt.xlabel('Predicted labels')
            plt.ylabel('True labels')
            plt.xticks([0, 1, 2], ['absent', 'soft', 'loud'])
            plt.yticks([0, 1, 2], ['absent', 'soft', 'loud'])
            plt.title('Confusion matrix')
            plt.savefig('ResultFile/PCG Confusion matrix.png', dpi=600)
            plt.close()
            #计算召回率 F1
            Absent_num = np.sum(cm[0])
            Soft_num = np.sum(cm[1])
            Loud_num = np.sum(cm[2])
            Absent_recall = cm[0][0] / Absent_num
            Soft_recall = cm[1][1] / Soft_num
            Loud_recall = cm[2][2] / Loud_num
            print("------------------------------PCG result------------------------------" )
            print("Absent_recall: %.4f, Soft_recall: %.4f, Loud_recall: %.4f"
                  % (Absent_recall, Soft_recall, Loud_recall))
            a = np.sum(cm, 0)
            Absent_Precision = cm[0][0] / a[0]
            Soft_Precision = cm[1][1] / a[1]
            Loud_Precision = cm[2][2] / a[2]

            Absent_f1 = (2 * Absent_recall * Absent_Precision) / (Absent_recall + Absent_Precision)
            Soft_f1 = (2 * Soft_recall * Soft_Precision) / (Soft_recall + Soft_Precision)
            Louf_f1 = (2 * Loud_recall * Loud_Precision) / (Loud_recall + Loud_Precision)
            print("Absent_F1: %.4f, Soft_F1: %.4f, Loud_F1: %.4f"
                  % (Absent_f1, Soft_f1, Louf_f1))


            # 患者混淆矩阵
            # 将预测标签和真实标签转换为numpy数组
            y_pred = np.array(patient_pre)
            y_true = np.array(patient_ture_label)
            # 计算混淆矩阵
            cm = confusion_matrix(y_true, y_pred)
            # 绘制混淆矩阵
            plt.figure()
            plt.imshow(cm, cmap=plt.cm.Blues)
            plt.colorbar()
            # 显示矩阵元素的数值
            for i in range(cm.shape[0]):
                for j in range(cm.shape[1]):
                    plt.text(j, i, cm[i, j], ha='center', va='center')
            plt.xlabel('Predicted labels')
            plt.ylabel('True labels')
            plt.xticks([0, 1, 2], ['absent', 'soft', 'loud'])
            plt.yticks([0, 1, 2], ['absent', 'soft', 'loud'])
            plt.title('Confusion matrix')
            plt.savefig('ResultFile/patient Confusion matrix.png', dpi=600)
            plt.close()
            # 计算召回率 F1
            Absent_num = np.sum(cm[0])
            Soft_num = np.sum(cm[1])
            Loud_num = np.sum(cm[2])
            Absent_recall = cm[0][0] / Absent_num
            Soft_recall = cm[1][1] / Soft_num
            Loud_recall = cm[2][2] / Loud_num
            print("------------------------------Patient result------------------------------")
            print("Absent_recall: %.4f, Soft_recall: %.4f, Loud_recall: %.4f"
                  % (Absent_recall, Soft_recall, Loud_recall))
            a = np.sum(cm, 0)
            Absent_Precision = cm[0][0] / a[0]
            Soft_Precision = cm[1][1] / a[1]
            Loud_Precision = cm[2][2] / a[2]

            Absent_f1 = (2 * Absent_recall * Absent_Precision) / (Absent_recall + Absent_Precision)
            Soft_f1 = (2 * Soft_recall * Soft_Precision) / (Soft_recall + Soft_Precision)
            Loud_f1 = (2 * Loud_recall * Loud_Precision) / (Loud_recall + Loud_Precision)
            print("Absent_F1: %.4f, Soft_F1: %.4f, Loud_F1: %.4f"
                  % (Absent_f1, Soft_f1, Loud_f1))





        # save = save_info(epoch, train_loss / len(train_loader), train_acc / len(train_loader),
        #                  val_loss / len(test_loader), val_acc / len(test_loader))
##






    # === 显示训练集和验证集loss曲线 ===
    plt.figure()
    plt.plot(all_train_loss, linewidth=1, label='Training Loss')
    plt.plot(all_val_loss, linewidth=1, label='Validation Loss')
    plt.title('Training and Validation Loss', fontsize=18)
    plt.xlabel('Epoch', fontsize=18)
    plt.ylabel('Loss', fontsize=18)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    plt.legend()
    plt.savefig('ResultFile/Training and Validation Loss.png', dpi=600)
    plt.close()







#保存历史loss到txt文件
    np_train_acc = np.array(all_train_acc).reshape((len(all_train_acc), 1))  # reshape是为了能够跟别的信息组成矩阵一起存储
    np_train_loss = np.array(all_train_loss).reshape((len(all_train_loss), 1))
    np_val_acc = np.array(all_val_acc).reshape((len(all_val_acc), 1))  # reshape是为了能够跟别的信息组成矩阵一起存储
    np_val_loss = np.array(all_val_loss).reshape((len(all_val_loss), 1))
    np_out = np.concatenate([np_train_acc, np_val_acc,np_train_loss,np_val_loss], axis=1)
    f = "ResultFile/save_result.txt"
    mytime = datetime.now()
    with open(f, "a") as file:
        file.write("===============================================================================" + "\n")
        file.write(str(mytime) + "\n")
        file.write("# num_epochs = " + str(num_epochs) + "\n")
        file.write("# train_batch_size = " + str(train_batch_size) + "\n")
        file.write("# test_batch_size = " + str(test_batch_size) + "\n")
        file.write("# learning_rate = " + str(learning_rate) + "\n")
        file.write("# val_acc = " + str('{:.4f}'.format(best_val_acc)) + "\n")
        file.write("# val_patient_acc = " + str('{:.4f}'.format(best_val_patient_acc)) + "\n")
        file.write("-----------------vali_recall----------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(Absent_recall))
                   + "  Soft: " + str('{:.4f}'.format(Soft_recall))
                   + "  Loud: " + str('{:.4f}'.format(Loud_recall))
                   + "\n")
        file.write("-------------------vali_F1------------------- " + "\n")
        file.write("Absent: " + str('{:.4f}'.format(Absent_f1))
                   + "  Soft: " + str('{:.4f}'.format(Soft_f1))
                   + "  Loud: " + str('{:.4f}'.format(Loud_f1))
                   + "\n")
        # file.write('train_acc    val_acc   train_loss    val_loss' + "\n")
        # for i in range(len(np_out)):
        #     file.write(str(np_out[i]) + '\n')
    print("save loss successful!!!")



















