# -*- coding:utf-8 -*-#
# @Time:2023/7/11 14:20
# @Author:Adong
# @Software:PyCharm

"""
Filename:
Author:
Contact:
"""

from os import listdir, mkdir, path, sep

import matplotlib.pyplot as plt
from librosa import load as load_
from numpy import argmax, bincount, linspace
from PIL.Image import fromarray, merge
from PIL.Image import open as _open
from sklearn.preprocessing import MinMaxScaler
from torch import cuda, eye
from torch import load as _load
from torch import max as _max

from torch import nn, no_grad
from torch import save as _save
from torch import sum as _sum

from torch.optim import SGD
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.models import ResNet50_Weights, resnet50
from tqdm import tqdm


def buildcase():
    """

    :return:
    """
    _type = ['normal', 'zlpc', 'zgz', 'zjsd', 'jbfd']
    if not path.exists('result_img'):
        mkdir('result_img')
        print("result_img文件夹已创建...")
    if not path.exists('save_model'):
        mkdir('save_model')
        print("save_model文件夹已创建...")
    if not path.exists('wav2gary'):
        mkdir('wav2gary')
        print("wav2gary文件夹已创建...")
        mkdir('wav2gary/train/')
        print("wav2gary/train/文件夹已创建...")
        mkdir('wav2gary/test/')
        print("wav2gary/test/文件夹已创建...")
        mkdir('wav2gary/veri')
        print("wav2gary/veri文件夹已创建...")
        for _ in _type:
            mkdir('wav2gary/train/' + _)
            print(f'wav2gary/train/{_}文件夹已创建...')
            mkdir('wav2gary/test/' + _)
            print(f'wav2gary/test/{_}文件夹已创建...')
            mkdir('wav2gary/veri/' + _)
            print(f'wav2gary/veri/{_}文件夹已创建...')


def transform_to_gray(wav_path, win_length, stride, save_case):
    """
    音频文件的时域信号加窗切片，
    每个片段生成一张64*64的三通道灰度图，
    保存到指定目录下。
    :param wav_path:音频文件路径
    :param win_length:窗长
    :param stride:窗移步长
    :param save_case:图片保存路径
    :return:
    """
    seq = load_(wav_path)[0]
    start = linspace(0, len(seq) - win_length, stride)
    for _ in start:
        exist_img_num = len(listdir(save_case))  # 记录文件夹中已经存在的图片数量，避免重复生成覆盖
        piece = seq[int(_):int(_ + win_length)]
        pic = piece.reshape(64, 64)
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        norm_piece = normalize_tool.fit_transform(pic)

        _r = fromarray(norm_piece).convert('L')
        _g = fromarray(norm_piece).convert('L')
        _b = fromarray(norm_piece).convert('L')
        img = merge('RGB', (_r, _g, _b))
        fault_type = path.normpath(wav_path).split(sep)[-2]
        save_path = path.join(save_case, fault_type + '_' + str(exist_img_num + 1) + '.png')
        img.save(save_path)  # 保存灰度图


class DataSet(Dataset):
    """
    Dataset对象
    """

    def __init__(self, case_path):
        allimg_path = []
        self.fault_type = listdir(case_path)  # 训练集所有故障类型
        for _ft in self.fault_type:
            ft_train_path = path.join(case_path, _ft)  # 故障类型训练集路径
            img_name = listdir(ft_train_path)  # 故障类型训练集文件名
            for i in img_name:
                img_item_path = path.normpath(path.join(ft_train_path, i))  # 文件路径
                allimg_path.append(img_item_path)
        self.allimg_path = allimg_path
        self.data_transform = transforms.Compose([transforms.ToTensor()])  # 转tensor

    def __getitem__(self, idx):
        img_path = self.allimg_path[idx]  # 读取数据集文件名
        img = _open(img_path)  # 打开文件
        img = self.data_transform(img)  # img转tensor
        label = img_path.split(sep)[-2]
        index = self.fault_type.index(label)  # 标签编码
        return img, index

    def __len__(self):
        return len(self.allimg_path)


class ResNet50(nn.Module):
    """
    ResNet-NCL网络
    """

    def __init__(self, output_size):
        '''

        :param output_size:输出尺寸
        '''
        super().__init__()
        self.resnet50 = resnet50(weights=ResNet50_Weights.DEFAULT)
        self.relu = nn.ReLU()
        resnet50_outfeatures = self.resnet50.fc.out_features
        self.f_1 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)

    def forward(self, _x):
        """
        前向传播
        :param _x:输入样本
        :return: 输出预测概率
        """
        _x = self.resnet50(_x)
        _x = self.relu(_x)
        output = self.f_1(_x)
        return output


class ResNet50_NCL(nn.Module):
    """

    """

    def __init__(self, output_size):
        '''

        :param output_size:输出尺寸
        '''
        super(ResNet50_NCL, self).__init__()
        self.resnet50 = resnet50(weights=ResNet50_Weights.DEFAULT)
        self.relu = nn.ReLU()
        resnet50_outfeatures = self.resnet50.fc.out_features
        self.f1 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f2 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f3 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f4 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f5 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)

    def forward(self, x):
        x = self.resnet50(x)
        x = self.relu(x)
        o1 = self.f1(x)
        o2 = self.f2(x)
        o3 = self.f3(x)
        o4 = self.f4(x)
        o5 = self.f5(x)
        return o1, o2, o3, o4, o5


class Trainer:
    """

    """

    def __init__(self, vision, train_dataset, test_dataset):
        self.vision = vision  # 设置版本
        self.device = "cuda" if cuda.is_available() else 'cpu'  # 设置训练设备
        print("training by " + self.device + "!")

        self.model = ResNet50_NCL(output_size=5).to(self.device)  # 加载网络
        self.CE = nn.CrossEntropyLoss()  # 交叉熵损失函数
        self.MSE = nn.MSELoss(reduction='sum')  # 均方差损失函数
        self.optimizer = SGD(self.model.parameters(), lr=1e-3, momentum=0.9)  # 设置SGD优化器
        self.lr_scheduler = StepLR(optimizer=self.optimizer, step_size=10, gamma=0.1)
        self.train_dataset = DataSet(case_path=train_dataset)
        self.test_dataset = DataSet(case_path=test_dataset)
        self.batch_size = 32
        self.train_dataloader = DataLoader(self.train_dataset, self.batch_size, shuffle=True)
        self.test_dataloader = DataLoader(self.test_dataset, self.batch_size, shuffle=True)
        self.epoch = None

    @classmethod
    def one_hot(cls, x, class_count):
        """
        第一构造一个[class_count, class_count]的对角线为1的向量
        第二保留label对应的行并返回
        :param x:
        :param class_count:
        :return:
        """
        return eye(class_count)[x, :]

    def train(self):
        """

        :return:
        """
        model = self.model
        device = self.device
        dataloader = self.train_dataloader
        optimizer = self.optimizer

        model.train()  # 将模型设置为训练模式

        cum_loss = 0  # 已训练batch的累计误差
        cum_acc = 0.0  # 已训练batch的累计准确率
        n = 0  # 已训练batch的计数
        t = tqdm(dataloader, desc=f'[train]epoch:{self.epoch}')
        for X, y in t:
            X = X.to(device)  # 加载X到device
            y = y.to(device)  # 加载y到device

            '''ResNet50_NCL'''
            o1, o2, o3, o4, o5 = model(X)
            output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值
            lamb = 0.4  # lambda的值
            p = -(self.MSE(o1, output) +
                  self.MSE(o2, output) +
                  self.MSE(o3, output) +
                  self.MSE(o4, output) +
                  self.MSE(o5, output))
            y_p = self.one_hot(y.cpu(), len(o1[0])).to(device)
            e = (self.MSE(o1, y_p) +
                 self.MSE(o2, y_p) +
                 self.MSE(o3, y_p) +
                 self.MSE(o4, y_p) +
                 self.MSE(o5, y_p))
            cur_loss = e + lamb * p  # 计算误差
            max_probability, pred_label = _max(output, dim=1)
            cur_acc = _sum(y.eq(pred_label)) / output.shape[0]

            '''权重调整部分'''
            optimizer.zero_grad()  # 将梯度归零
            cur_loss.backward()  # 反向传播计算各个参数的梯度值
            optimizer.step()  # 梯度下降执行一步参数更新

            cum_loss += cur_loss.item()  # 累加误差
            cum_acc += cur_acc.item()  # 累加准确率
            n = n + 1  # 记录批次数量（总共有多少批次）
            t.set_postfix({'批平均训练误差': cum_loss / n, '批平均训练准确率': cum_acc / n})
        return cum_acc / n, cum_loss / n  # 返回平均正确率和平均误差

    def test(self):
        """

        :return:
        """
        model = self.model
        device = self.device
        dataloader = self.test_dataloader

        model.eval()  # 将模型设置为验证模式，告诉网络当前阶段为测试阶段，模型的参数再该阶段不更新
        cum_loss = 0  # 已训练batch的累计误差
        cum_acc = 0.0  # 已训练batch的累计准确率
        n = 0  # 已训练batch的计数

        with no_grad():  # 停止梯度计算
            t = tqdm(dataloader, desc=f'[test]epoch:{self.epoch}')
            for X, y in t:
                X, y = X.to(device), y.to(device)

                '''ResNet50_NCL'''
                o1, o2, o3, o4, o5 = model(X)
                output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值
                cur_loss = self.CE(output, y)

                max_p, pred_label = _max(output, dim=1)  # 返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。
                cur_acc = _sum(y.eq(pred_label)) / output.shape[0]
                cum_loss += cur_loss.item()
                cum_acc += cur_acc.item()
                n = n + 1
                t.set_postfix({'批平均测试误差': cum_loss / n, '批平均测试准确率': cum_acc / n})
            return cum_acc / n, cum_loss / n  # 返回平均正确率和平均误差

    def start_running(self, epoch):
        tal = []  # train_average_loss
        val = []  # val_average_loss
        taa = []  # train_average_acc
        vaa = []  # val_average_acc

        model = self.model
        min_acc = 0  # 初始化最小正确率为0
        for e in range(epoch):  # 开始循环训练
            self.epoch = e + 1  # 记录当前训练是第几代
            acc1, loss1 = self.train()  # 训练
            acc2, loss2 = self.test()  # 测试

            '''记录'''
            tal.append(loss1)
            taa.append(acc1)
            val.append(loss2)
            vaa.append(acc2)

            '''训练日志'''
            with open('ResNet train diary.txt', 'a+') as f:
                f.write(f'epoch{e + 1} - '
                        f'批平均训练误差：{loss1},'
                        f'批平均测试误差：{loss2},'
                        f'批平均训练准确率：{acc1},'
                        f'批平均测试准确率：{acc2}\n')

            if acc2 > min_acc:  # 如果比最小的平均正确率大，那么保存这次训练得到的模型
                min_acc = acc2  # 更新最小正确率
                _save(model.state_dict(), 'save_model/ResNet_' + self.vision + '.pth')  # 保存权重到目录
                print("Current best model has been saved!")
        print('training finished!')
        self.draw(tal, val, taa, vaa)

    def draw(self, tal, val, taa, vaa):

        fig = plt.figure()  # 创建画板
        plt.suptitle('ResNet_' + self.vision)  # 设置画板标题

        '''生成子图'''
        ax1 = plt.subplot(221)
        ax2 = plt.subplot(222)
        ax3 = plt.subplot(223)
        ax4 = plt.subplot(224)

        '''设置子图标题'''
        ax1.set_title('train_Avgloss')
        ax2.set_title('val_Avgloss')
        ax3.set_title('train_Avgacc')
        ax4.set_title('val_Avgacc')

        '''画图'''
        ax1.plot(tal)
        ax2.plot(val)
        ax3.plot(taa)
        ax4.plot(vaa)
        fig.tight_layout()  # 调整紧凑布局

        save_path = './result_img/'
        plt.savefig(path.join(save_path, 'result_img_LeNet_' + self.vision + '.png'))  # 保存图片


class Verifier:
    """
    验证器对象
    """

    def __init__(self, model_root, verify_dataset):
        self.model_root = model_root
        self.data_transform = transforms.Compose([transforms.ToTensor()])
        self.device = "cuda" if cuda.is_available() else 'cpu'
        print(f"running by {self.device} !")
        self.model = ResNet50_NCL(5).to(self.device)  # 调用MyLeNet里面定义的网络，将模型数据转到GPU
        self.model.load_state_dict(_load(self.model_root))  # 将之前保存的网络权重加载到model上
        self.veri_dataset = DataSet(case_path=verify_dataset)
        self.batch_size = 32
        self.veri_dataloader = DataLoader(self.veri_dataset, self.batch_size, shuffle=True)

    def start_verifying(self):
        model = self.model
        device = self.device
        dataloader = self.veri_dataloader
        model.eval()  # 将模型设置为验证模式，告诉网络当前阶段为测试阶段，模型的参数再该阶段不更新
        cum_acc = 0.0  # 已训练batch的累计准确率
        _n = 0  # 已训练batch的计数
        with no_grad():
            veri_dataloader = tqdm(dataloader, desc=f'[verification]')
            for _X, _y in veri_dataloader:
                _X, _y = _X.to(device), _y.to(device)
                output = model(_X)
                max_p, pred_label = _max(output, dim=1)  # 返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。
                cur_acc = _sum(_y.eq(pred_label)) / output.shape[0]
                cum_acc += cur_acc.item()
                _n = _n + 1
                veri_dataloader.set_postfix({'批平均验证误差': cum_acc / _n})

    def start_running(self, wav_path):
        seq, rate = load_(wav_path)
        mini_seq = seq[:len(seq) // 4096 * 4096]
        pics = mini_seq.reshape(-1, 64 * 64)
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normal_pics = normalize_tool.fit_transform(pics)

        img = fromarray(normal_pics)  # 将array转为image
        img = img.convert('L')  # 将image转为灰度图

        ft = self.veri_dataset.fault_type
        img = self.data_transform(img).to(self.device)  # img转tensor
        output = self.model(img)
        max_p, pred_label = _max(output, dim=1)
        counts = bincount(pred_label.cpu().detach().numpy())
        count = argmax(counts)
        label = ft[count]
        print(f"故障类型：{label}")


if __name__ == '__main__':
    # 文件夹生成
    buildcase()
    # 时域信号转灰度图
    dataset_root = r'wav_data_V3/'
    for mode in ['train', 'test', 'veri']:
        print(f"正在生成{mode}集...")
        for t in listdir(path.join(dataset_root, mode)):
            for file in listdir(path.join(dataset_root, mode, t)):
                print(f"正在生成{t}类型:{file}")
                filepath = path.join(dataset_root, mode, t, file)
                transform_to_gray(filepath, 4096, 4096, path.join('gray', mode, t))
    # 生成训练器对象
    _trainer = Trainer('V1', 'gray/train', 'gray/test')
    # 执行循环训练
    _trainer.start_running(1)
    # 生成验证器对象
    _verifier = Verifier('save_model/LeNet_V1.pth', 'gray/veri')
    # 执行验证
    _verifier.start_verifying()
    # 使用
    _verifier.start_running('wav_data_V3/veri/zgz/54.wav')
