# -*- coding: utf-8 -*-
# May Saint Diana bless your coding!
# Created Time : 2021/12/4 14:29
# Author:Zhou Chenghao
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
import os

import ANC_tools

torch.manual_seed(1)  # reproducible

# 读取实验数据
# path = "次级通道建模数据.csv"  # 输入是传递给次级扬声器播放的信号（高斯白噪声，能激发出所有频段），输出是在误差传感器处采集到的误差信号
path = "非线性3.csv"  # 一个参考信号和该信号经过非线性过程的对应输出
data = np.loadtxt(path, delimiter=",")
signal_x = data[:, 0]
signal_y = data[:, 1]

# Hyper Parameters
TIME_STEP = 48  # rnn time step 一个时间样本
INPUT_SIZE = 1  # rnn input size
LR = 0.01  # learning rate
secondaryPathModel = torch.tensor([0.08, -0.1, 0.2, 0.5, 1, 0, 0, 0])  # 最前面的是最早之前的 y(n-7)
secondaryPathModel = secondaryPathModel[np.newaxis, :, np.newaxis]


class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()

        self.rnn = nn.RNN(  # 这回一个普通的 RNN 就能胜任
            input_size=1,
            hidden_size=32,  # rnn hidden unit    神经元数量
            num_layers=1,  # 有几层 RNN layers
            batch_first=True,  # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
        )
        self.out = nn.Linear(32, 8)  # 输出层 和次级路径模型需要的时间点数对应

    def forward(self, x, h_state):  # 因为 hidden state 是连续的, 所以我们要一直传递这一个 state
        # x (batch, time_step, input_size)
        # h_state (n_layers, batch, hidden_size)
        # r_out (batch, time_step, output_size)
        r_out, h_state = self.rnn(x, h_state)  # h_state 也要作为 RNN 的一个输入
        outs = []  # 保存所有时间点的预测值
        for time_step in range(r_out.size(1)):  # 对每一个时间点计算 output  动态图
            outs.append(self.out(r_out[:, time_step, :]))
        return torch.stack(outs, dim=1), h_state


class My_loss(nn.Module):
    """
    自定义损失函数
    """

    def __init__(self):
        super().__init__()

    def forward(self, pred, target):
        """
        网络得到的输出要继续经过次级路径，得到降噪点处的信号
        :param pred:
        :param target:
        :return:
        """
        filtered_y = torch.matmul(pred, secondaryPathModel)
        return torch.pow(torch.mean(torch.abs(filtered_y + target)), 2)


rnn = RNN()
# 如果存在训练过的网络参数文件，直接读取
if os.path.exists('rnn_anc_net_params.pkl'):
    rnn.load_state_dict(torch.load('rnn_anc_net_params.pkl'))  # 提取net的参数

optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)  # optimize all rnn parameters
# loss_func = nn.MSELoss()
loss_func = My_loss()
h_state = None  # 要使用初始 hidden state, 可以设成 None

train_x = signal_x[0:48000]  # 取多少数据进行训练
train_y = signal_y[0:48000]
T = len(train_x)

error = np.zeros(T)

print("开始训练！")
for i in range(T // TIME_STEP):  # 训练样本数
    start = i * TIME_STEP
    end = (i + 1) * TIME_STEP
    steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)  # 这段时间分成10份
    x_np = train_x[start:end]  # TIME_STEP个数据作为一个时间点的输入
    y_np = train_y[start:end]
    x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis]).float())  # shape (batch, time_step, input_size)
    y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]).float())  # 增加维度
    prediction, h_state = rnn(x, h_state)  # rnn 对于每个 step 的 prediction, 还有最后一个 step 的 h_state
    # !!  下一步十分重要 !!
    h_state = Variable(h_state.data)  # 要把 h_state 重新包装一下才能放入下一个 iteration, 不然会报错
    loss = loss_func(prediction, y)  # cross entropy loss
    optimizer.zero_grad()  # clear gradients for this training step
    loss.backward()  # backpropagation, compute gradients
    # optimizer.step()  # apply gradients 验证的时候，导入网络数据，不进行更新网络就行（把这句注释掉）
    ys = torch.matmul(prediction, secondaryPathModel).data.numpy().flatten()
    error[start:end] = ys + y_np.flatten()

# torch.save(rnn.state_dict(), 'rnn_anc_net_params.pkl')  # 只保存网络中的参数 (速度快, 占内存少)

print("训练完成！")

plt.figure()
plt.subplot(211)
plt.plot(train_y)
plt.plot(error - train_y)
plt.plot(error)
plt.legend(["primary noise", "secondary noise", "residual noise"])
plt.subplot(212)
plt.plot(ANC_tools.ANR(train_y, error))
plt.show()
