# -*- coding:utf-8 -*-#
"""
Filename:project_lenet.py
Author:ZhangQidong
Contact:835439833@qq.com
"""

from os import listdir, mkdir, path, sep

import matplotlib.pyplot as plt
import numpy as np
from librosa import load as load_
from numpy import linspace
from PIL.Image import fromarray
from PIL.Image import open as open_
from sklearn.preprocessing import MinMaxScaler
from torch import cuda
from torch import load as _load
from torch import max as max_
from torch import nn, no_grad
from torch import save as save_
from torch import sum as sum_
from torch.optim import SGD
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from tqdm import tqdm


def buildcase():
    """
    创建文件夹
    :return:
    """
    fault_type = ['normal', 'zlpc', 'zgz', 'zjsd', 'jbfd']
    if not path.exists('result_img'):
        mkdir('result_img')
        print("result_img文件夹已创建...")
    if not path.exists('save_model'):
        mkdir('save_model')
        print("save_model文件夹已创建...")
    if not path.exists('gray'):
        mkdir('gray')
        print("gary文件夹已创建...")
        mkdir('gray/train/')
        print("gray/train/文件夹已创建...")
        mkdir('gray/test/')
        print("gray/test/文件夹已创建...")
        mkdir('gray/veri')
        print("gray/veri文件夹已创建...")
        for __ in fault_type:
            mkdir('gray/train/' + __)
            print(f'gray/train/{__}文件夹已创建...')
            mkdir('gray/test/' + __)
            print(f'gray/test/{__}文件夹已创建...')
            mkdir('gray/veri/' + __)
            print(f'gray/veri/{__}文件夹已创建...')


def transform_to_gray(wav_path, win_length, stride, savecase):
    """
    音频文件的时域信号加窗切片，
    每个片段生成一张64*64的灰度图，
    保存到指定目录下。
    :param wav_path:音频文件路径
    :param win_length:窗长
    :param stride:窗移步长
    :param savecase:图片保存路径
    :return:
    """
    seq, rate = load_(wav_path)
    start = linspace(0, len(seq) - win_length, stride)
    for _ in start:
        exist_img_num = len(listdir(savecase))  # 记录文件夹中已经存在的图片数量，避免重复生成覆盖
        piece = seq[int(_):int(_ + win_length)]
        pic = piece.reshape(64, 64)
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normpiece = normalize_tool.fit_transform(pic)

        img = fromarray(normpiece)  # 将array转为image
        img = img.convert('L')  # 将image转为灰度图
        faulttypte = path.normpath(wav_path).split(sep)[-2]
        savepath = path.join(savecase, faulttypte + '_' + str(exist_img_num + 1) + '.png')
        img.save(savepath)  # 保存灰度图


class DataSet(Dataset):
    """
    组织数据集
    """

    def __init__(self, case_path):
        all_img_path = []
        self.fault_type = listdir(case_path)  # 训练集所有故障类型
        for _ in self.fault_type:
            ft_train_path = path.join(case_path, _)  # 故障类型训练集路径
            img_name = listdir(ft_train_path)  # 故障类型训练集文件名
            for i in img_name:
                img_item_path = path.normpath(path.join(ft_train_path, i))  # 文件路径
                all_img_path.append(img_item_path)
        self.all_img_path = all_img_path
        self.data_transform = transforms.Compose([transforms.ToTensor()])  # 转tensor

    def __getitem__(self, idx):
        img_path = self.all_img_path[idx]  # 读取数据集文件名
        img = open_(img_path)  # 打开文件
        img = self.data_transform(img)  # img转tensor
        label = img_path.split(sep)[-2]
        index = self.fault_type.index(label)  # 标签编码
        return img, index

    def __len__(self):
        return len(self.all_img_path)


class Net(nn.Module):
    """
    CNN网络结构
    """

    def __init__(self):
        super().__init__()
        self.re_lu = nn.ReLU()
        self.l_1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2)
        self.l_2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l_3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
        self.l_4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l_5 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
        self.l_6 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l_7 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
        self.l_8 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.f_1 = nn.Linear(in_features=256 * 4 * 4, out_features=2560)
        self.f_2 = nn.Linear(in_features=2560, out_features=768)
        self.output = nn.Linear(in_features=768, out_features=5)

    def forward(self, input_):
        """
        前向传播
        :param input_:输入样本
        :return:
        """
        input_ = self.re_lu(self.l_1(input_))  # 卷积1
        input_ = self.l_2(input_)  # 池化1
        input_ = self.re_lu(self.l_3(input_))  # 卷积2
        input_ = self.l_4(input_)  # 池化2
        input_ = self.re_lu(self.l_5(input_))  # 卷积3
        input_ = self.l_6(input_)  # 池化3
        input_ = self.re_lu(self.l_7(input_))  # 卷积4
        input_ = self.l_8(input_)  # 池化4
        input_ = input_.view(-1, 256 * 4 * 4)
        input_ = self.f_1(input_)
        input_ = self.f_2(input_)
        output_ = self.output(input_)
        return output_


class Trainer:
    """
    训练器对象，包括训练函数、测试函数、循环训练函数
    """

    def __init__(self, vision, train_dataset, test_dataset):
        self.vision = vision  # 设置版本
        self.device = "cuda" if cuda.is_available() else 'cpu'  # 设置训练设备
        print(f"training by {self.device} !")
        self.model = Net().to(self.device)  # 加载网络
        self.optimizer = SGD(self.model.parameters(), lr=1e-3, momentum=0.9)  # 设置SGD优化器
        self.lr_scheduler = StepLR(optimizer=self.optimizer, step_size=10, gamma=0.1)
        self.train_dataset = DataSet(case_path=train_dataset)
        self.test_dataset = DataSet(case_path=test_dataset)
        self.batch_size = 32
        self.train_dataloader = DataLoader(self.train_dataset, self.batch_size, shuffle=True)
        self.test_dataloader = DataLoader(self.test_dataset, self.batch_size, shuffle=True)
        self.epoch = None

    def train(self):
        """
        训练函数
        :return:
        """
        model = self.model
        device = self.device
        dataloader = self.train_dataloader
        optimizer = self.optimizer
        model.train()  # 将模型设置为训练模式
        cum_loss = 0  # 已训练batch的累计误差
        cum_acc = 0.0  # 已训练batch的累计准确率
        _n = 0  # 已训练batch的计数
        train_dataloader = tqdm(dataloader, desc=f'[train]epoch:{self.epoch}')
        for _x, _y in train_dataloader:
            _x = _x.to(device)  # 加载X到device
            _y = _y.to(device)  # 加载y到device

            output = model(_x)  # 输出分类概率
            cur_loss = nn.CrossEntropyLoss()(output, _y)  # 计算交叉熵误差
            max_p, _label = max_(output, dim=1)  # 返回最大概率和预测标签
            cur_acc = sum_(_y.eq(_label)) / output.shape[0]  # 计算当前批次的预测准确率

            optimizer.zero_grad()  # 将梯度归零
            cur_loss.backward()  # 反向传播计算各个参数的梯度值
            optimizer.step()  # 梯度下降执行一步参数更新

            cum_loss += cur_loss.item()  # 累加误差
            cum_acc += cur_acc.item()  # 累加准确率
            _n = _n + 1  # 记录批次数量（总共有多少批次）
            train_dataloader.set_postfix({'批平均训练误差': cum_loss / _n, '批平均训练准确率': cum_acc / _n})
        return cum_acc / _n, cum_loss / _n  # 返回平均正确率和平均误差

    def test(self):
        """
        测试函数
        :return:
        """
        model = self.model
        device = self.device
        dataloader = self.test_dataloader

        model.eval()  # 将模型设置为验证模式，告诉网络当前阶段为测试阶段，模型的参数再该阶段不更新
        cum_loss = 0  # 已训练batch的累计误差
        cum_acc = 0.0  # 已训练batch的累计准确率
        _n = 0  # 已训练batch的计数

        with no_grad():  # 停止梯度计算
            test_dataloader = tqdm(dataloader, desc=f'[test]epoch:{self.epoch}')
            for _x, _y in test_dataloader:
                _x, _y = _x.to(device), _y.to(device)

                output = model(_x)
                cur_loss = nn.CrossEntropyLoss()(output, _y)

                max_p, _label = max_(output, dim=1)  # 返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。
                cur_acc = sum_(_y.eq(_label)) / output.shape[0]
                cum_loss += cur_loss.item()
                cum_acc += cur_acc.item()
                _n = _n + 1
                test_dataloader.set_postfix({'批平均测试误差': cum_loss / _n, '批平均测试准确率': cum_acc / _n})
            return cum_acc / _n, cum_loss / _n  # 返回平均正确率和平均误差

    def start_running(self, epoch):
        """
        循环训练函数
        :param epoch:训练迭代次数
        :return:
        """
        tal = []  # train_average_loss
        val = []  # val_average_loss
        taa = []  # train_average_acc
        vaa = []  # val_average_acc

        model = self.model
        min_acc = 0  # 初始化最小正确率为0
        for epo in range(epoch):  # 开始循环训练
            self.epoch = epo + 1  # 记录当前训练是第几代
            acc1, loss1 = self.train()  # 训练
            acc2, loss2 = self.test()  # 测试

            # 记录
            tal.append(loss1)
            taa.append(acc1)
            val.append(loss2)
            vaa.append(acc2)

            # 训练日志
            with open('train diary.txt', 'a+') as _f:
                _f.write(
                    f'epoch{epo + 1} - '
                    f'批平均训练误差：{loss1},'
                    f'批平均测试误差：{loss2},'
                    f'批平均训练准确率：{acc1},'
                    f'批平均测试准确率：{acc2}\n')

            if acc2 > min_acc:  # 如果比最小的平均正确率大，那么保存这次训练得到的模型
                min_acc = acc2  # 更新最小正确率
                save_(model.state_dict(), 'save_model/LeNet_' + self.vision + '.pth')  # 保存权重到路径
                print("Current best model has been saved!")
        print('training finished!')
        self.draw(tal, val, taa, vaa)

    def draw(self, train_average_loss, val_average_loss, train_average_acc, val_average_acc):
        """
        画图函数
        :param train_average_loss: 批平均训练误差
        :param val_average_loss: 批平均测试误差
        :param train_average_acc: 批平均训练准确率
        :param val_average_acc: 批平均测试准确率
        :return:
        """

        fig = plt.figure()  # 创建画板
        plt.suptitle('LeNet_' + self.vision)  # 设置画板标题

        # 生成子图
        ax1 = plt.subplot(221)
        ax2 = plt.subplot(222)
        ax3 = plt.subplot(223)
        ax4 = plt.subplot(224)

        # 设置子图标题
        ax1.set_title('train_Avgloss')
        ax2.set_title('val_Avgloss')
        ax3.set_title('train_Avgacc')
        ax4.set_title('val_Avgacc')

        # 画图
        ax1.plot(train_average_loss)
        ax2.plot(val_average_loss)
        ax3.plot(train_average_acc)
        ax4.plot(val_average_acc)
        fig.tight_layout()  # 调整紧凑布局
        plt.savefig('./result_img/result_img_LeNet_' + self.vision + '.png')  # 保存图片


class Verifier:
    """
    验证器对象
    """

    def __init__(self, model_root, verify_dataset):
        self.model_root = model_root
        self.data_transform = transforms.Compose([transforms.ToTensor()])
        self.device = "cuda" if cuda.is_available() else 'cpu'
        print(f"running by {self.device} !")
        self.model = Net().to(self.device)  # 调用MyLeNet里面定义的网络，将模型数据转到GPU
        self.model.load_state_dict(_load(self.model_root))  # 将之前保存的网络权重加载到model上
        self.veri_dataset = DataSet(case_path=verify_dataset)
        self.batch_size = 32
        self.veri_dataloader = DataLoader(self.veri_dataset, self.batch_size, shuffle=True)

    def start_verifying(self):
        model = self.model
        device = self.device
        dataloader = self.veri_dataloader
        model.eval()  # 将模型设置为验证模式，告诉网络当前阶段为测试阶段，模型的参数再该阶段不更新
        cum_acc = 0.0  # 已训练batch的累计准确率
        _n = 0  # 已训练batch的计数
        with no_grad():
            veri_dataloader = tqdm(dataloader, desc=f'[verification]')
            for _X, _y in veri_dataloader:
                _X, _y = _X.to(device), _y.to(device)
                output = model(_X)
                max_p, pred_label = max_(output, dim=1)  # 返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。
                cur_acc = sum_(_y.eq(pred_label)) / output.shape[0]
                cum_acc += cur_acc.item()
                _n = _n + 1
                veri_dataloader.set_postfix({'批平均验证误差': cum_acc / _n})

    def start_running(self, wav_path):
        seq, rate = load_(wav_path)
        mini_seq = seq[:len(seq) // 4096 * 4096]
        pics = mini_seq.reshape(-1, 64 * 64)
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normal_pics = normalize_tool.fit_transform(pics)

        img = fromarray(normal_pics)  # 将array转为image
        img = img.convert('L')  # 将image转为灰度图

        ft = self.veri_dataset.fault_type
        img = self.data_transform(img).to(self.device)  # img转tensor
        output = self.model(img)
        max_p, pred_label = max_(output, dim=1)
        counts = np.bincount(pred_label.cpu().detach().numpy())
        count = np.argmax(counts)
        label = ft[count]
        print(f"故障类型：{label}")


if __name__ == '__main__':
    # 文件夹生成
    buildcase()
    # 时域信号转灰度图
    dataset_root = r'wav_data_V3/'
    for mode in ['train', 'test', 'veri']:
        print(f"正在生成{mode}集...")
        for t in listdir(path.join(dataset_root, mode)):
            for file in listdir(path.join(dataset_root, mode, t)):
                print(f"正在生成{t}类型:{file}")
                filepath = path.join(dataset_root, mode, t, file)
                transform_to_gray(filepath, 4096, 4096, path.join('gray', mode, t))
    # 生成训练器对象
    _trainer = Trainer('V1', 'gray/train', 'gray/test')
    # 执行循环训练
    _trainer.start_running(1)
    # 生成验证器对象
    _verifier = Verifier('save_model/LeNet_V1.pth', 'gray/veri')
    # 执行验证
    _verifier.start_verifying()
    # 使用
    _verifier.start_running('wav_data_V3/veri/zgz/54.wav')
