#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:       :
@Date     :2022/05/15 00:41:40
@Author      :Cosecant
@version      :1.0
                    _ooOoo_                     
                   o8888888o                    
                  88   .   88                    
                   (| -_- |)                    
                   O\  =  /O                    
                ____/`---'\____                 
              .'   \|     |/   `.               
             /   \|||  :  |||/   \              
            /  _||||| -:- |||||_  \             
            |   | \ \  -  /// |   |             
            | \_|  ''\---/''  |_/ |             
            \  .-\__  `-`  ___/-. /             
          ___`. .'  /--.--\  `. . __            
       .'' '<  `.___\_<|>_/___.'  >' ''.         
      | | :  `- \`.;`\ _ /`;.`/ - ` : | |       
      \  \ `-.   \_ __\ /__ _/   .-` /  /       
 ======`-.____`-.___\_____/___.-`____.-'======   
                    `=---='                     
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^   
        佛祖保佑        永无BUG                  
'''
from torch.utils.tensorboard import SummaryWriter
import datetime
import torch
import torch.nn as nn
import torch.utils.data as Data
import scipy.io as sio
import utils.data_preparation as mydp
import utils.func as myfunc

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class MyNet(nn.Module):

    def __init__(self, input_size, hidden_size, num_layers, class_num):
        super(MyNet, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.cnn = nn.Sequential(
            nn.Conv1d(input_size, 64, 3, stride=1, padding=1), 
            nn.BatchNorm1d(64),
            nn.LeakyReLU(),
            nn.Conv1d(64, 128, 3, stride=1, padding=1), 
            nn.BatchNorm1d(128),
            nn.Dropout(droprate),
            nn.LeakyReLU(),
            nn.Conv1d(128, 1, 3, stride=1, padding=1))
        self.fc = nn.Linear(3000, class_num)
        """ self.lstm = nn.LSTM(512,
                            hidden_size,
                            num_layers,
                            batch_first=True) """
        #input tensor of shape ( batch size, sequence length, input_size)
        #self.fc = nn.Linear(hidden_size, class_num)
        """ self.fc = nn.Sequential(
            nn.Linear(sequence_length*hidden_size, 512),
            nn.Dropout(droprate),  #Dropout层
            nn.LeakyReLU(),
            nn.Linear(512, 256),
            nn.LeakyReLU(),
            nn.Linear(256, 128)) """

        #把隐藏状态空间用全连接层映射到输出空间。

    def forward(self, x):
        #CNN
        cnn_out = self.cnn(x)
        cnn_out = cnn_out.flatten(1)
        output = self.fc(cnn_out)
        """ #cnn_out=cnn_out.permute(0, 2, 1)
        #LSTM
        output, _ = self.lstm(cnn_out)
        #tensor of shape (batch size, sequence length, hidden_size)
        output = output[:, -1, :]
        output = self.fc(output)
        #tensor of shape (batch size, sequence length, hidden_size) """

        return output


if __name__ == '__main__':
    writer = SummaryWriter(
        'runs/' + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S/'))
    #config
    pre_train = False

    #Preprocessed NinaPro DB8 dataset
    #./EMG_data/train/ include all *A1&*A2 data
    #./EMG_data/test/ include all *A3 data
    TRAIN_FILE_PATH = './EMG_data/train/'
    TEST_FILE_PATH = './EMG_data/test/'

    DEAD_ZONE = 0  #表面肌电信号一般比肢体运动至少超前30ms，且采样频率为2kHz则死区为60个数据

    #模型参数 官网介绍https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html

    input_size = 16  #The number of expected features in the input x
    hidden_size = 32  #The number of features in the hidden state h
    num_layers = 3  #Number of recurrent layers. E.g., setting num_layers=2
    #would mean stacking two LSTMs together to form a stacked LSTM,
    #with the second LSTM taking in outputs of the first LSTM and
    #computing the final results. Default: 1
    droprate = 0.2
    class_num = 10
    sequence_length = 3000
    window_step = 10  #<sequence_length
    batch_size = 512
    #由于batch的存在，t-1的hidden_state并不能传入t时刻，而是传到了t+batch_size时刻。也就是产生了一个响应滞后。
    #理论上batch_size=sequence_length时，刚好可以用得到的hidden_state作为下一个batch的输入

    num_epochs = 200
    learning_rate = 0.0001

    model = MyNet(input_size, hidden_size, num_layers, class_num).to(device)

    #Loss
    #weights = [0.1, 9, 9, 9, 9, 9, 9, 9, 9, 9]
    #class_weights = torch.FloatTensor(weights).to(device)
    crit = nn.CrossEntropyLoss()
    #optimizer 整个优化器 听说RMSProp对RNN很友好 Adam整合了RMSProp
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    if pre_train:
        model = myfunc.load_checkpoint(torch.load('model.pth'), optimizer)
    #train
    foo = 0
    bar = 0
    global_train_step = 0
    train_total_batch_step = int(45656806 / batch_size / 10 / 10 /
                                 20)  #常数，算法如下
    test_total_batch_step = int(5289783 / batch_size / 10 / 10)  #同理
    #train_total_batch_step = int(45656806 / batch_size)  #常数，算法如下
    #test_total_batch_step = int(5289783 / batch_size)  #同理
    """ for f in mydp.findAllMat(TRAIN_FILE_PATH):
        temp_dataset=mydp.MyDataSet(TRAIN_FILE_PATH+f)
        total_step+=temp_dataset.__len__()
    print(total_step) """
    train_total_step = 0
    test_total_step=0
    for epoch in range(num_epochs):
        print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        for f in mydp.findAllMat(TRAIN_FILE_PATH):  #切换数据集
            matData = sio.loadmat(TRAIN_FILE_PATH + f)  #读取mat文件
        temp_dataset = mydp.MyDataSet4(matData, sequence_length, window_step)
        j = 0
        batch_num = 0
        train_loader = Data.DataLoader(dataset=temp_dataset,
                                    batch_size=batch_size,
                                    shuffle=True,
                                    drop_last=True,
                                    num_workers=0)
        model.train()
        for i, (mdata, labels) in enumerate(train_loader):
            #input tensor of shape (batch size, sequence length, input_size)
            mdata = mdata.permute(0, 2, 1)
            #mdata = mdata.transpose(1, 0).to(device)
            labels = labels.long().squeeze().to(device)
            mdata = mdata.to(device)
            #正向传播
            outputs = model(mdata)
            loss = crit(outputs, labels)
            #反向传播
            optimizer.zero_grad()  #初始化梯度
            loss.backward()
            optimizer.step()

            #显示进度
            predictions = torch.argmax(outputs, dim=1)
            """ outputs = torch.softmax(outputs, dim=0)
            targets = torch.zeros(batch_size, class_num).to(device)
            labels = labels.unsqueeze(1)
            targets.scatter_(1, labels.long(), 1)
            predictions = torch.mul(outputs, targets)
            accuracy = torch.sum(predictions) / batch_size
            predictions = torch.argmax(outputs, dim=1) """
            accuracy = torch.sum(predictions == labels) / labels.shape[0]
            writer.add_scalar('predictions',
                              predictions[0],
                              global_step=global_train_step)
            bar += accuracy
            foo += loss.item()
            batch_num += 1
            global_train_step += 1
            j += 1
            for foobar in range(batch_size):
                writer.add_scalars('contrast2', {
                    'output': predictions[foobar],
                    'label': labels[foobar]
                }, train_total_step)  #比较估计值和实际值。
                train_total_step += 1
            #if (j + 1) % 10 == 0:
            writer.add_scalar('train_loss',
                              loss.item(),
                              global_step=global_train_step)
            writer.add_scalar('Epoch',
                              epoch + 1,
                              global_step=global_train_step)
            #if (j + 1) % 100 == 0:  #准确度验证
            writer.add_scalar('accuracy', bar / batch_num,
                              global_train_step)  #比较估计值和实际值。
            print('Epoch [{}/{}], Step [{}/{}], Accuracy:{:.2f}, Loss: {:.4f}'.
                  format(epoch + 1, num_epochs, j + 1, train_total_batch_step,
                         bar / batch_num, foo / batch_num))
            foo = 0
            bar = 0
            batch_num = 0
        #保存模型
        if(epoch%3==4):
            torch.save(model, 'model' + str(epoch + 1) + '.pth')
        #test
        with torch.no_grad():
            global_accuracy=0
            global_test_step=0
            for f in mydp.findAllMat(TEST_FILE_PATH):  #切换数据集
                matData = sio.loadmat(TEST_FILE_PATH + f)  #读取mat文件
                temp_dataset2 = mydp.MyDataSet4(matData, sequence_length, window_step)
                test_loader = Data.DataLoader(dataset=temp_dataset2,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            drop_last=True)
                model.eval()
                sum_loss = 0
                total = 0
                for i, (mdata, labels) in enumerate(test_loader):
                    #input tensor of shape (batch size, sequence length, input_size)
                    mdata = mdata.permute(0, 2, 1)
                    #mdata = mdata.transpose(1, 0).to(device)
                    labels = labels.long().squeeze().to(device)
                    mdata = mdata.to(device)
                    #正向传播
                    outputs = model(mdata)
                    predictions = torch.argmax(outputs, dim=1)
                    accuracy = torch.sum(predictions == labels) / labels.shape[0]
                    global_accuracy+=accuracy
                    global_test_step+=1
                    for foobar in range(batch_size):
                        writer.add_scalars('contrast', {
                            'output': predictions[foobar],
                            'label': labels[foobar]
                        }, test_total_step)  #比较估计值和实际值。
                        test_total_step += 1
            print("accuracy=:"+str(global_accuracy/global_test_step))
    writer.close()
