#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:       :
@Date     :2022/05/10 21:36:28
@Author      :Cosecant
@version      :1.0
                    _ooOoo_                     
                   o8888888o                    
                  88   .   88                    
                   (| -_- |)                    
                   O\  =  /O                    
                ____/`---'\____                 
              .'   \|     |/   `.               
             /   \|||  :  |||/   \              
            /  _||||| -:- |||||_  \             
            |   | \ \  -  /// |   |             
            | \_|  ''\---/''  |_/ |             
            \  .-\__  `-`  ___/-. /             
          ___`. .'  /--.--\  `. . __            
       .'' '<  `.___\_<|>_/___.'  >' ''.         
      | | :  `- \`.;`\ _ /`;.`/ - ` : | |       
      \  \ `-.   \_ __\ /__ _/   .-` /  /       
 ======`-.____`-.___\_____/___.-`____.-'======   
                    `=---='                     
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^   
        佛祖保佑        永无BUG                  
'''
from torch.utils.tensorboard import SummaryWriter
import datetime
import torch
import torch.nn as nn
import torch.utils.data as Data

import utils.data_preparation as mydp
import utils.func as myfunc

import LSTM_train

writer = SummaryWriter('runs/test/'+datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S/'))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MyNet(nn.Module):

    def __init__(self, input_size, hidden_size, num_layers, output_chan):
        super(MyNet, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size,
                            hidden_size,
                            num_layers,
                            batch_first=True)
        #input tensor of shape (sequence length, batch size, input_size)
        #self.fc = nn.Linear(hidden_size, output_chan)
        #cnn input shape[N, C, H, W] N=batch C=input_size H=hidden_size W=sequenceLength
        """ self.conv2 = nn.Sequential(
            nn.Conv1d(in_channels=hidden_size, out_channels=1, kernel_size=3),
            nn.ReLU(inplace=True)) """

        self.fc = nn.Linear(hidden_size, output_chan)

        #把隐藏状态空间用全连接层映射到输出空间。

    def forward(self, x, h_state):
        #h_state=[h_state[0][:,:x.shape[0],:],h_state[1][:,:x.shape[0],:]]
        #LSTM
        output, h_state_new = self.lstm(x, h_state)
        #tensor of shape (batch size, sequence length, hidden_size)
        output = output[:, -1, :]  #取最后一个hidden out
        output = self.fc(output)
        #tensor of shape (batch size, sequence length, hidden_size)

        return output, h_state_new

if __name__ == '__main__':
    
    #config
    pre_train = True

    #Preprocessed NinaPro DB8 dataset
    #./EMG_data/train/ include all *A1&*A2 data
    #./EMG_data/test/ include all *A3 data
    TRAIN_FILE_PATH = './EMG_data/thumb_scaled/normalization/train/'
    TEST_FILE_PATH = './EMG_data/thumb_scaled/normalization/test/'
    
    DEAD_ZONE = 10  #表面肌电信号一般比肢体运动至少超前30ms，且采样频率为2kHz则死区为60个数据

    #模型参数 官网介绍https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html

    input_size = 16  #The number of expected features in the input x
    hidden_size = 64  #The number of features in the hidden state h
    num_layers = 3  #Number of recurrent layers. E.g., setting num_layers=2
    #would mean stacking two LSTMs together to form a stacked LSTM,
    #with the second LSTM taking in outputs of the first LSTM and
    #computing the final results. Default: 1
    droprate = 0.2
    output_chan = 3
    data_step = 10
    sequence_length = 1000
    batch_size = 100
    #由于batch的存在，t-1的hidden_state并不能传入t时刻，而是传到了t+batch_size时刻。也就是产生了一个响应滞后。
    #理论上batch_size=sequence_length时，刚好可以用得到的hidden_state作为下一个batch的输入

    crit = nn.MSELoss()

    h0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
    c0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
    h_state = [h0, c0]

    if pre_train:
        model=torch.load('./model10.pth')
    global_test_step = 0
    test_total_step = int(5289783 / batch_size)

    #test
    with torch.no_grad():
        for f in mydp.findAllMat(TEST_FILE_PATH):  #切换数据集
            temp_dataset = mydp.MyDataSet(TEST_FILE_PATH + f, sequence_length, 0,data_step)
            test_loader = Data.DataLoader(dataset=temp_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          drop_last=True)
            model.eval()
            sum_loss = 0
            total = 0
            for i, (mdata, labels) in enumerate(test_loader):
                #input tensor of shape (sequence length, batch size, input_size)
                #mdata = mdata.transpose(1, 0).to(device)
                mdata = mdata.to(device)
                labels = labels.to(device)
                outputs, h_state = model(mdata, h_state)
                loss = crit(outputs, labels)

                global_test_step+=1
                outputs_sum=outputs.sum(1)
                labels_sum=labels.sum(1)        #对通道求和，这样就能体现整体的准确率 
                for foobar in range(labels.shape[0]):
                    writer.add_scalars('test_contrast', {
                        "output":outputs_sum[foobar],
                        "raw":labels_sum[foobar]
                    }, test_total_step)  #比较估计值和实际值。
                    writer.add_scalars('test_contrast1', {
                        "output":outputs[foobar,0],
                        "raw":labels[foobar,0]
                    }, test_total_step)  #比较估计值和实际值。
                    writer.add_scalars('test_contrast2', {
                        "output":outputs[foobar,1],
                        "raw":labels[foobar,1]
                    }, test_total_step)  #比较估计值和实际值。
                    writer.add_scalars('test_contrast3', {
                        "output":outputs[foobar,2],
                        "raw":labels[foobar,2]
                    }, test_total_step)  #比较估计值和实际值。
                    test_total_step += 1
                #writer.add_scalars('test_contrast',{'output':outputs_sum[0],'glove':labels[0]},global_test_step)   #比较估计值和实际值
    
    writer.close()
