# -*- coding:utf-8 -*-#
# @Time:2023/7/11 20:01
# @Author:Adong
# @Software:PyCharm

import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import time
import librosa
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import scipy.io.wavfile as _wavefile

def lstm_dataset(data, days_for_train=5) -> (np.array, np.array):
    """
        根据给定的序列data，生成数据集

        数据集分为输入和输出，每一个输入的长度为days_for_train，每一个输出的长度为1。
        也就是说用days_for_train天的数据，对应下一天的数据。

        若给定序列的长度为d，将输出长度为(d-days_for_train+1)个输入/输出对
    """
    dataset_x, dataset_y = [], []
    for i in range(len(data) - days_for_train):
        _x = data[i:(i + days_for_train)]
        dataset_x.append(_x)  # 添加前DAYS_FOR_TRAIN个数据
        dataset_y.append(data[i + days_for_train])  # 添加第DAYS_FOR_TRAIN个数据
    return (np.array(dataset_x), np.array(dataset_y))

def _user(filepath, modelpath):
    device = 'cpu'
    seq, rate = librosa.load(filepath)
    dataset_x = lstm_dataset(seq, 10)[0]
    dataset_x = torch.Tensor(dataset_x).to(device)
    model = lstm_net(10, 8, output_size=1, num_layers=2).to(device)
    model.load_state_dict(torch.load(modelpath))

    pred = model(dataset_x).detach().numpy()
    pred = np.concatenate((np.zeros(10), pred))  # 填充0 使长度相同
    assert len(pred) == len(seq)
    loss_fn = nn.MSELoss()
    loss = loss_fn(torch.Tensor(pred),torch.Tensor(seq))
    print(loss)
    _wavefile.write("new1.wav", int(rate), pred.astype(np.float32))

class lstm_net(nn.Module):
    def __init__(self, input_size, hidden_size, output_size=1, num_layers=2):
        super().__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)  # 设置LSTM结构
        self.fc = nn.Linear(hidden_size, output_size)  # 设置隐藏层与输出层之间的全连接层

    def forward(self, _x):
        _x = _x.to(torch.float32)
        x, _ = self.lstm(_x)  # 输出数据包括output, (h_t, c_t):h_t包含的是句子的最后一个单词的隐藏状态，c_t包含的是句子的最后一个单词的细胞状态
        # s, b, h = x.shape       # x is output, size (seq_len, batch, hidden_size)
        # x = x.view(s * b, h)    # 重构输出的形状，把所有批的输出结果合并在一起
        x = self.fc(x)  # 输入全连接层得到输出
        # x = x.view(s, b, -1)    # 把形状改回来
        x = x.reshape(-1)
        return x





class trainer:
    def __init__(self, data, days_for_train):
        t0 = time.time()
        self.day_for_train = days_for_train
        self.device = 'cpu'
        self.dataset_x, self.dataset_y = lstm_dataset(data=data, days_for_train=days_for_train)
        self.model = lstm_net(days_for_train, 8, output_size=1, num_layers=2).to(self.device)  # 加载网络
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-2, betas=(0.9, 0.999), eps=1e-08,
                                          weight_decay=0)
        self.loss_function = nn.MSELoss()

    def train(self):
        model = self.model
        device = self.device
        optimizer = self.optimizer
        self.train_size = int(len(self.dataset_x) * 0.7)
        train_x = torch.from_numpy(self.dataset_x[:self.train_size])
        train_y = torch.from_numpy(self.dataset_y[:self.train_size])

        model.train()  # 将模型设置为训练模式

        train_x.to(device)
        train_y.to(device)

        out = model(train_x)
        loss = self.loss_function(out, train_y)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        return loss.item()

    def test(self):
        model = self.model
        model.eval()

        test_x = torch.from_numpy(self.dataset_x)
        test_y = torch.from_numpy(self.dataset_y)

        pred_test = model(test_x)  # 全量训练集
        # 的模型输出 (seq_size, batch_size, output_size)
        pred_test = pred_test.view(-1).data.numpy()
        pred_test = np.concatenate((np.zeros(self.day_for_train), pred_test))  # 填充0 使长度相同
        assert len(pred_test) == len(data_close)

        plt.plot(pred_test, 'r', label='prediction')
        plt.plot(data_close, 'b', label='real')
        plt.plot((self.train_size, self.train_size), (0, 1), 'g--')  # 分割线 左边是训练数据 右边是测试数据的输出
        plt.legend(loc='best')
        plt.savefig('result.png', format='png', dpi=200)
        plt.close()

    def start_running(self, epochs):
        train_loss = []
        for i in range(epochs):

            loss = self.train()
            train_loss.append(loss)
            # 将训练过程的损失值写入文档保存，并在终端打印出来
            with open('log.txt', 'a+') as f:
                f.write('{} - {}\n'.format(i + 1, loss))
            if (i + 1) % 1 == 0:
                print('Epoch: {}, Loss:{:.5f}'.format(i + 1, loss))

        # 画loss曲线
        plt.figure()
        plt.plot(train_loss, 'b', label='loss')
        plt.title("Train_Loss_Curve")
        plt.ylabel('train_loss')
        plt.xlabel('epoch_num')
        plt.savefig('loss.png', format='png', dpi=200)
        plt.close()

        torch.save(self.model.state_dict(), 'save_model/LSTM_jbfd_3sr.pth')  # 保存训练好的权重到指定目录
        self.test()





if __name__ == '__main__':
    data_close,sr = librosa.load('E:\@MY_code\Voice_Recognize\data\wav_data_V4\jbfd\局部放电_byq_jbfd_01.wav')
    data_close = data_close.astype('float32')[2*sr:3*sr]  # 转换数据类型
    plt.plot(data_close)
    plt.savefig('data.png', format='png', dpi=200)
    plt.close()
    max_value = np.max(data_close)
    min_value = np.min(data_close)
    data_close = (data_close - min_value) / (max_value - min_value)
    zqd = trainer(data_close,10)
    zqd.start_running(200)

    _user('E:\@MY_code\Voice_Recognize\data\wav_data_V4\jbfd\局部放电_byq_jbfd_01.wav', 'save_model/LSTM_jbfd_3sr.pth')
