#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:       :
@Date     :2022/05/10 21:36:28
@Author      :Cosecant
@version      :1.0
                    _ooOoo_                     
                   o8888888o                    
                  88   .   88                    
                   (| -_- |)                    
                   O\  =  /O                    
                ____/`---'\____                 
              .'   \|     |/   `.               
             /   \|||  :  |||/   \              
            /  _||||| -:- |||||_  \             
            |   | \ \  -  /// |   |             
            | \_|  ''\---/''  |_/ |             
            \  .-\__  `-`  ___/-. /             
          ___`. .'  /--.--\  `. . __            
       .'' '<  `.___\_<|>_/___.'  >' ''.         
      | | :  `- \`.;`\ _ /`;.`/ - ` : | |       
      \  \ `-.   \_ __\ /__ _/   .-` /  /       
 ======`-.____`-.___\_____/___.-`____.-'======   
                    `=---='                     
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^   
        佛祖保佑        永无BUG                  
'''
from torch.utils.tensorboard import SummaryWriter
import datetime
import torch
import torch.nn as nn
import torch.utils.data as Data
import scipy.io as sio
import utils.data_preparation as mydp
import utils.func as myfunc

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class MyNet(nn.Module):

    def __init__(self, input_size, hidden_size, num_layers, class_num):
        super(MyNet, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.cnn = nn.Sequential(
            nn.Conv1d(input_size, 64, 3, stride=1, padding=1), 
            nn.BatchNorm1d(64),
            nn.LeakyReLU(),
            nn.Conv1d(64, 128, 3, stride=1, padding=1), 
            nn.BatchNorm1d(128),
            nn.Dropout(droprate),
            nn.LeakyReLU(),
            nn.Conv1d(128, 1, 3, stride=1, padding=1))
        self.fc = nn.Linear(3000, class_num)
        """ self.lstm = nn.LSTM(512,
                            hidden_size,
                            num_layers,
                            batch_first=True) """
        #input tensor of shape ( batch size, sequence length, input_size)
        #self.fc = nn.Linear(hidden_size, class_num)
        """ self.fc = nn.Sequential(
            nn.Linear(sequence_length*hidden_size, 512),
            nn.Dropout(droprate),  #Dropout层
            nn.LeakyReLU(),
            nn.Linear(512, 256),
            nn.LeakyReLU(),
            nn.Linear(256, 128)) """

        #把隐藏状态空间用全连接层映射到输出空间。

    def forward(self, x):
        #CNN
        cnn_out = self.cnn(x)
        cnn_out = cnn_out.flatten(1)
        output = self.fc(cnn_out)
        """ #cnn_out=cnn_out.permute(0, 2, 1)
        #LSTM
        output, _ = self.lstm(cnn_out)
        #tensor of shape (batch size, sequence length, hidden_size)
        output = output[:, -1, :]
        output = self.fc(output)
        #tensor of shape (batch size, sequence length, hidden_size) """

        return output


if __name__ == '__main__':
    writer = SummaryWriter('runs/test/'+datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S/'))
    #config
    pre_train = True

    #Preprocessed NinaPro DB8 dataset
    #./EMG_data/train/ include all *A1&*A2 data
    #./EMG_data/test/ include all *A3 data
    TRAIN_FILE_PATH = './EMG_data/train/'
    TEST_FILE_PATH = './EMG_data/test/'
    DEAD_ZONE=0

    #模型参数 官网介绍https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html

    input_size = 16  #The number of expected features in the input x
    hidden_size = 32  #The number of features in the hidden state h
    num_layers = 3  #Number of recurrent layers. E.g., setting num_layers=2
    #would mean stacking two LSTMs together to form a stacked LSTM,
    #with the second LSTM taking in outputs of the first LSTM and
    #computing the final results. Default: 1
    droprate = 0.2
    class_num = 10
    sequence_length = 3000
    window_step = 10  #<sequence_length
    batch_size = 128
    #由于batch的存在，t-1的hidden_state并不能传入t时刻，而是传到了t+batch_size时刻。也就是产生了一个响应滞后。
    #理论上batch_size=sequence_length时，刚好可以用得到的hidden_state作为下一个batch的输入

    num_epochs = 200
    learning_rate = 0.0001
    
    if pre_train:
        model=torch.load('./model_backup/model10.pth')
    global_test_step = 0
    global_accuracy=0
    test_total_step = 0
    #Loss
    #weights = [0.1, 9, 9, 9, 9, 9, 9, 9, 9, 9]
    #class_weights = torch.FloatTensor(weights).to(device)
    crit = nn.CrossEntropyLoss()
    #optimizer 整个优化器 听说RMSProp对RNN很友好 Adam整合了RMSProp
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    #test
    with torch.no_grad():
        for f in mydp.findAllMat(TEST_FILE_PATH):  #切换数据集
            matData = sio.loadmat(TEST_FILE_PATH + f)  #读取mat文件
            temp_dataset = mydp.MyDataSet4(matData, sequence_length, window_step)
            test_loader = Data.DataLoader(dataset=temp_dataset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          drop_last=True)
            model.eval()
            sum_loss = 0
            total = 0
            for i, (mdata, labels) in enumerate(test_loader):
                #input tensor of shape (batch size, sequence length, input_size)
                mdata = mdata.permute(0, 2, 1)
                #mdata = mdata.transpose(1, 0).to(device)
                labels = labels.long().squeeze().to(device)
                mdata = mdata.to(device)
                #正向传播
                outputs = model(mdata)
                predictions = torch.argmax(outputs, dim=1)
                accuracy = torch.sum(predictions == labels) / labels.shape[0]
                global_accuracy+=accuracy
                global_test_step+=1
                for foobar in range(batch_size):
                    writer.add_scalars('contrast', {
                        'output': predictions[foobar],
                        'label': labels[foobar]
                    }, test_total_step)  #比较估计值和实际值。
                    test_total_step += 1
        print("accuracy=:"+str(global_accuracy/global_test_step))