#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:       :
@Date     :2022/05/07 23:16:06
@Author      :Cosecant
@version      :1.0
                    _ooOoo_                     
                   o8888888o                    
                  88   .   88                    
                   (| -_- |)                    
                   O\  =  /O                    
                ____/`---'\____                 
              .'   \|     |/   `.               
             /   \|||  :  |||/   \              
            /  _||||| -:- |||||_  \             
            |   | \ \  -  /// |   |             
            | \_|  ''\---/''  |_/ |             
            \  .-\__  `-`  ___/-. /             
          ___`. .'  /--.--\  `. . __            
       .'' '<  `.___\_<|>_/___.'  >' ''.         
      | | :  `- \`.;`\ _ /`;.`/ - ` : | |       
      \  \ `-.   \_ __\ /__ _/   .-` /  /       
 ======`-.____`-.___\_____/___.-`____.-'======   
                    `=---='                     
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^   
        佛祖保佑        永无BUG                  
'''
from doctest import OutputChecker
from torch.utils.tensorboard import SummaryWriter
import datetime
import torch
import torch.nn as nn
import torch.utils.data as Data
import scipy.io as sio
import utils.data_preparation as mydp
import utils.func as myfunc

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class MyNet(nn.Module):

    def __init__(self, input_size, hidden_size, num_layers, output_chan):
        super(MyNet, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size,
                            hidden_size,
                            num_layers,
                            batch_first=True)
        #input tensor of shape (sequence length, batch size, input_size)
        #self.fc = nn.Linear(hidden_size, output_chan)
        #cnn input shape[N, C, H, W] N=batch C=input_size H=hidden_size W=sequenceLength
        """ self.conv2 = nn.Sequential(
            nn.Conv1d(in_channels=hidden_size, out_channels=1, kernel_size=3),
            nn.ReLU(inplace=True)) """

        self.fc = nn.Linear(hidden_size, output_chan)

        #把隐藏状态空间用全连接层映射到输出空间。

    def forward(self, x, h_state):
        #h_state=[h_state[0][:,:x.shape[0],:],h_state[1][:,:x.shape[0],:]]
        #LSTM
        output, h_state_new = self.lstm(x, h_state)
        #tensor of shape (batch size, sequence length, hidden_size)
        output = output[:, -1, :]  #取最后一个hidden out
        output = self.fc(output)
        #tensor of shape (batch size, sequence length, hidden_size)

        return output, h_state_new


if __name__ == '__main__':
    writer = SummaryWriter(
        'runs/' + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S/'))
    #config
    pre_train = False

    #Preprocessed NinaPro DB8 dataset
    #./EMG_data/train/ include all *A1&*A2 data
    #./EMG_data/test/ include all *A3 data
    #TRAIN_FILE_PATH = './EMG_data/thumb_scaled/normalization/train/'
    #TEST_FILE_PATH = './EMG_data/thumb_scaled/normalization/test/'
    TRAIN_FILE_PATH = './EMG_data/thumb_scaled/normalization/train/'
    TEST_FILE_PATH = './EMG_data/thumb_scaled/normalization/test/'
    onnx_path = "./model_backup/"

    DEAD_ZONE = 10  #表面肌电信号一般比肢体运动至少超前30ms，且采样频率为2kHz则死区为60个数据

    #模型参数 官网介绍https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html

    input_size = 16  #The number of expected features in the input x
    hidden_size = 64  #The number of features in the hidden state h
    num_layers = 3  #Number of recurrent layers. E.g., setting num_layers=2
    #would mean stacking two LSTMs together to form a stacked LSTM,
    #with the second LSTM taking in outputs of the first LSTM and
    #computing the final results. Default: 1
    droprate = 0.2
    output_chan = 3
    data_step = 10
    sequence_length = 1000
    batch_size = 100
    #由于batch的存在，t-1的hidden_state并不能传入t时刻，而是传到了t+batch_size时刻。也就是产生了一个响应滞后。
    #理论上batch_size=sequence_length时，刚好可以用得到的hidden_state作为下一个batch的输入

    num_epochs = 20
    learning_rate = 0.0002

    model = MyNet(input_size, hidden_size, num_layers, output_chan).to(device)
    #Loss 选择均方损失函数 因为这里每个output_channel都是独立的
    crit = nn.MSELoss()
    #optimizer 整个优化器 听说RMSProp对RNN很友好
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    #初始化hidden和cell的states 不赋予初值所以用zeros
    #详情：https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html

    h0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
    c0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
    h_state = [h0, c0]

    if pre_train:
        model = myfunc.load_checkpoint(torch.load('model.pth'), optimizer)
    #train
    foo = 0
    global_train_step = 0
    train_total_step = int(245793 * 2 / batch_size / data_step)  #常数，算法如下
    test_total_step = int(5289783 / batch_size / 10 / 2)  #同理
    #train_total_step = int(45656806 / batch_size)  #常数，算法如下
    #test_total_step = int(5289783 / batch_size)  #同理
    """ for f in mydp.findAllMat(TRAIN_FILE_PATH):
        temp_dataset=mydp.MyDataSet(TRAIN_FILE_PATH+f)
        total_step+=temp_dataset.__len__()
    print(total_step) """
    for epoch in range(num_epochs):
        print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        j = 0
        for f in mydp.findAllMat(TRAIN_FILE_PATH):  #切换数据集
            #matData = sio.loadmat(TRAIN_FILE_PATH + f)  #读取mat文件
            temp_dataset = mydp.MyDataSet(TRAIN_FILE_PATH + f, sequence_length,
                                          0, data_step)
            train_loader = Data.DataLoader(dataset=temp_dataset,
                                           batch_size=batch_size,
                                           shuffle=False,
                                           drop_last=True,
                                           num_workers=0)
            model.train()
            for i, (mdata, labels) in enumerate(train_loader):
                #input tensor of shape (batch size, sequence length, input_size)
                #mdata = mdata.transpose(1, 0).to(device)
                mdata = mdata.to(device)
                labels = labels.to(device)
                #正向传播
                outputs, h_state = model(mdata, h_state)
                loss = crit(outputs, labels)
                h_state = (h_state[0].detach(), h_state[1].detach()
                           )  # 截断(TBPTT)
                #outputs = outputs.detach()
                #反向传播
                optimizer.zero_grad()  #初始化梯度
                loss.backward()
                optimizer.step()

                #显示进度
                #foo += loss.item()
                outputs_sum = outputs.sum(1)
                labels_sum = labels.sum(1)  #对通道求和，这样就能体现整体的准确率
                for foobar in range(labels.shape[0]):
                    writer.add_scalars('contrast', {
                        'output': outputs_sum[foobar],
                        'glove': labels_sum[foobar]
                    }, global_train_step)  #比较估计值和实际值。
                    global_train_step += 1
                writer.add_scalar('train_loss',
                                  loss.item(),
                                  global_step=global_train_step)
                writer.add_scalar('Epoch',
                                  epoch + 1,
                                  global_step=global_train_step)
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                    epoch + 1, num_epochs, j + 1, train_total_step,
                    loss.item()))
                j += 1
        #torch.onnx.export(model, mdata, onnx_path+'onnx_model'+str(epoch + 1)+'.onnx')
        #trace_model = torch.jit.trace(model, mdata)
        #trace_model.save("./mtrace.pt")
        #test
        with torch.no_grad():
            h0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
            c0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
            h_state = [h0, c0]
            global_accuracy = 0
            global_test_step = 0
            for f in mydp.findAllMat(TEST_FILE_PATH):  #切换数据集
                #matData = sio.loadmat(TEST_FILE_PATH + f)  #读取mat文件
                temp_dataset2 = mydp.MyDataSet(TEST_FILE_PATH + f,
                                               sequence_length, 0, data_step)
                test_loader = Data.DataLoader(dataset=temp_dataset2,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              drop_last=True)
                model.eval()
                for i, (mdata, labels) in enumerate(test_loader):
                    #input tensor of shape (batch size, sequence length, input_size)
                    mdata = mdata.to(device)
                    labels = labels.to(device)
                    #正向传播
                    outputs, h_state = model(mdata, h_state)
                    loss = crit(outputs, labels)
                    outputs_sum = outputs.sum(1)
                    labels_sum = labels.sum(1)  #对通道求和，这样就能体现整体的准确率
                    for foobar in range(labels.shape[0]):
                        writer.add_scalars(
                            'test_contrast', {
                                "output": outputs_sum[foobar],
                                "raw": labels_sum[foobar]
                            }, test_total_step)  #比较估计值和实际值。
                        test_total_step += 1
                    writer.add_scalar('test_loss',
                                      loss.item(),
                                      global_step=test_total_step)
        checkpoint = {'model': model, 'optimizer': optimizer.state_dict()}
        torch.save(model, 'model' + str(epoch + 1) + '.pth')
        #myfunc.save_checkpoint(checkpoint, 'model' + str(epoch + 1) + '.pth')

    writer.close()
