# from __future__ import print_function
import torch
import torch.nn as nn
# from torch.nn.utils import weight_norm
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
import scipy.io as scio
import os
import numpy as np
import csv
import matplotlib.pyplot as plt
from scipy import signal
from sklearn import preprocessing
# from torchsummary import summary
# import torchvision
# import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class MyDataset(data.Dataset):
    def __init__(self, raweeg, labels):
        self.eeg = raweeg
        self.labels = labels
        wn = 2 * 80 / 1000
        self.b,self.a = signal.butter(6, wn,'lowpass') 
    def __getitem__(self, index):
        data, label = self.eeg[index], self.labels[index]
        #平均重参考
        avg_data = data - np.mean(data, axis=0)
        #去基线漂移
        detrend_data = signal.detrend(avg_data, axis=0)
        #带通滤波
        filted_data = signal.filtfilt(
            self.b, self.a,detrend_data,axis=0
            )
        #降采样
        resampled_data = filted_data
        if filted_data.shape[0] == 1000:
            resampled_data = signal.resample(
                filted_data, 200
            )
        # downsample= filted_data[0:1000:5,:]
        #maxmin归一化
        res_data = preprocessing.maxabs_scale(resampled_data, axis=0, copy=True)
        return res_data.astype(np.float32).copy(),label

    def __len__(self):
        return len(self.eeg)

class Cnn(nn.Module): 
    def __init__(self, in_dim, n_classes):
        super(Cnn, self).__init__()        
        self.conv1 = nn.Conv2d(1, 64, kernel_size=5)
        self.relu = nn.ReLU(inplace=True)
        # self.batch_norm1 = nn.BatchNorm2d(64)
        self.dropout = nn.Dropout(0.5)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(64, 128, kernel_size=3)
        # self.batch_norm2 = nn.BatchNorm2d(128)
        # self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv2d(128, 64, kernel_size=3)
        # self.batch_norm3 = nn.BatchNorm2d(64)
        # self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv4 = nn.Conv2d(64, 128, kernel_size=3)
        # self.batch_norm4 = nn.BatchNorm2d(128)
        # self.dropout = nn.Dropout(0.5)
        self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(128*10,128)
        self.fc2 = nn.Linear(128,n_classes)
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         torch.nn.init.xavier_uniform_(m.weight.data)
        #         torch.nn.init.constant_(m.bias.data,0.1)
        #     elif isinstance(m, nn.Linear):
        #         m.weight.data.normal_(0,0.01)
        #         m.bias.data.zero_()

        
    def forward(self, x):
        
        x = x.permute(0, 2, 1)
        x = x.unsqueeze(1)
        x = self.conv1(x)
        x = self.relu(x)
        # x = self.batch_norm1(x)
        # x = self.dropout(x)
        x = self.maxpool(x)
        x = self.conv2(x)
        x = self.relu(x)
        # x = self.batch_norm2(x)
        x = self.maxpool(x)

        x = self.conv3(x)
        x = self.relu(x)
        # x = self.batch_norm3(x)
        x = self.maxpool(x)
        x = self.conv4(x)
        x = self.relu(x)
        
        # x = self.batch_norm4(x)
        x = self.maxpool(x)        
 
        # x = self.gap(x)
        x=x.view(x.size()[0],-1)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        logits = F.softmax(x, dim=1)
        return logits
            


def read_file_name(path):
    files = os.listdir(path)
    s = []
    for file in files:
        if not os.path.isdir(file):
            s.append(file)
    return s


def preprocess_raweeg(path_name, debug = False):
    f = open('/home/ubuntu/Shurly_H/info.csv', 'r')
    reader = csv.reader(f)
    data_csv = []
    for item in reader:
        data_csv.append(item)
    f.close()
    file_names = read_file_name(path_name)

    target_data = []
    target_label = []
    index=0
    for fn in file_names:
        tmp_s = fn.split('.')[0].split('_')
        if tmp_s[0] == '':
            continue
        if debug:
            print(tmp_s)
        subject_index = int(tmp_s[0])
        exp_index = int(tmp_s[1])
        start_line = (subject_index - 1) * 6 + (exp_index - 1) * 2
        start_line = int(start_line)
        if 1 <= subject_index <= 15:
            start_point_list = [int(x) for x in data_csv[start_line][:15]]
            end_point_list = [int(x) for x in data_csv[start_line + 1][:15]]
            labels = [1, 0, -1, -1, 0, 1, -1, 0, 1, 1, 0, -1, 0, 1, -1]
        elif 22 <= subject_index <= 29:
            start_point_list = [int(x) for x in data_csv[start_line][:21]]
            end_point_list = [int(x) for x in data_csv[start_line + 1][:21]]
            labels = [1, -1, 0, -1, 1, 0, -1, 0, 1, -1, 0, 1, 1, 0, -1, -1, 0, 1, -1, 0, 1]
        else:
            print('error: subject index illegal')
            return
        if debug:
            print(path_name+fn)
            print(start_point_list)
            print(end_point_list)
            print(labels)
        index = index+1
        print(index)
        data = scio.loadmat(path_name+fn)['raweeg']

        for start_second, end_second, label in zip(start_point_list, end_point_list, labels):
            for i in range(start_second, end_second):
                tmp_data = data[:, i * 1000:(i + 1) * 1000]
            
                transpose_data = np.transpose(tmp_data)
                
                target_data.append(transpose_data)
                target_label.append(label+1) # -1 0 1 to 0 1 2
    return target_data, target_label


def train(net, trainloader, optimizer, criterion, valid_loader,scheduler,train_model_name='example_my_lstm.pkl', save_model=True):
    num_epochs = 100
    train_epoch_acc_history = []
    train_epoch_loss_history = []
    valid_epoch_loss_history=[]
    valid_epoch_acc_history=[]
    for epoch in range(num_epochs):
        print("train Epoch {}/{}".format(epoch, num_epochs-1))
        print("-"*10)
        running_loss = 0.
        running_corrects = 0.
        train_loss = 0.
        correct = 0
        total = 0
        net.train()
        scheduler.step()
        print('目前epoch学习率:{}'.format(scheduler.get_lr()[0]))
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            #inputs, targets = inputs.to('cpu'), targets.to('cpu')
            inputs = inputs.to(device)
            targets = targets.to(device)
            outputs = net(inputs)
            
            # regularization_loss = 0
            # for param in net.parameters():
            #     regularization_loss += torch.sum(abs(param))
            # calssify_loss = criterion(outputs, targets)
            # loss = calssify_loss + 1 * regularization_loss
            
            optimizer.zero_grad()
            
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            running_loss += loss.item() * inputs.size(0) #交叉熵损失函数是平均过的
            running_corrects += torch.sum(predicted.view(-1) == targets.view(-1)).item()
            #每个step的loss和auc
            if batch_idx % 40 ==0:
                print(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
              
        train_epoch_loss = train_loss/(batch_idx+1)
        train_epoch_acc = correct/total
        train_epoch_loss_history.append(train_epoch_loss)
        train_epoch_acc_history.append(train_epoch_acc)
        
        print(train_epoch_acc_history)
        #valid
        print("valid Epoch {}/{}".format(epoch, num_epochs-1))
        print("-"*10)
        valid_loss=0.
        valid_total=0.
        valid_correct=0.
        valid_running_loss = 0.
        valid_running_corrects = 0.
        
        net.eval()
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(valid_loader):
                #inputs, targets = inputs.to('cpu'), targets.to('cpu')
                inputs = inputs.to(device)
                targets = targets.to(device)
                outputs = net(inputs)
                loss = criterion(outputs, targets)
            
                valid_loss += loss.item()
                _, predicted = outputs.max(1)
                valid_total += targets.size(0)
                valid_correct += predicted.eq(targets).sum().item()
                valid_running_loss += loss.item() * inputs.size(0) #交叉熵损失函数是平均过的
                valid_running_corrects += torch.sum(predicted.view(-1) == targets.view(-1)).item()
                #每个step的loss和auc
                if batch_idx % 5 ==0:
                    print(batch_idx, len(valid_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                    % (valid_loss/(batch_idx+1), 100.*valid_correct/valid_total, valid_correct, valid_total))
            
            valid_epoch_loss = valid_loss/(batch_idx+1)
            valid_train_epoch_acc = valid_correct/valid_total
            valid_epoch_loss_history.append(valid_epoch_loss)
            valid_epoch_acc_history.append(valid_train_epoch_acc)
            print(valid_epoch_loss_history)
            print(valid_epoch_acc_history)
            torch.save(net.state_dict(),'/home/ubuntu/Shurly_H/models/%d.pkl' % (epoch))
        
    if save_model:
        torch.save(net.state_dict(), train_model_name)
    return train_epoch_loss_history,train_epoch_acc_history,valid_epoch_loss_history,valid_epoch_acc_history

def train_model(train_data_path='/home/ubuntu/huangd_test/TrainData/', save_model_path='example_my_lstm.pkl',valid_data_path='/home/ubuntu/huangd_test/TestData/'):
    train_batch_size = 128
    train_data, train_label = preprocess_raweeg(train_data_path)
    # train_data = DenoisMat(train_data)
    # train_data = AutoNorm(train_data)
    
    train_loader = torch.utils.data.DataLoader(
        MyDataset(train_data, train_label), batch_size=train_batch_size, shuffle=True)
    
    valid_batch_size = 64
    valid_data, valid_label = preprocess_raweeg(valid_data_path)

    valid_loader = torch.utils.data.DataLoader(
        MyDataset(valid_data, valid_label), batch_size=valid_batch_size, shuffle=True)


    input_size = 62
    n_classes = 3

    # net = MixCN(input_size, hidden_dim, n_layer, n_classes).to(device)
    net = Cnn(input_size,n_classes).to(device)
    # summary(net,(62,200))
    #再训练
    # pretrained_dict = torch.load('/home/ubuntu/Shurly_H/models/model_one/28.pkl')
    # net_state_dict = net.state_dict() # 获取已创建net的state_dict
    # pretrained_dict_1 = {k: v for k, v in pretrained_dict.items() if k in net_state_dict}
    # net_state_dict.update(pretrained_dict_1)
    # net.load_state_dict(net_state_dict)


    # ignored_params = list(map(id, net.fc2.parameters())) # 返回的是parameters的 内存地址
    # base_params = filter(lambda p: id(p) not in ignored_params, net.parameters())
    # optimizer = optim.Adam([
    # {'params': base_params},
    # {'params': net.fc2.parameters(), 'lr': 0.0001}], 0.0001)
    #####
    
    # net = net.to('cpu')
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.001) #,weight_decay=0
    scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=25,gamma=0.1)
    train_loss,train_acc,valid_loss,valid_acc = train(net, train_loader,optimizer,criterion,valid_loader,scheduler,save_model_path, True)
    return train_loss,train_acc,valid_loss,valid_acc
train_loss,train_acc,valid_loss,valid_acc = train_model()

num_epochs = 100
plt.title("Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.plot(range(1,num_epochs+1),train_loss,label="train")
plt.plot(range(1,num_epochs+1),valid_loss,label="valid")
plt.ylim((0,train_loss[0]+0.5))
plt.xticks(np.arange(1, num_epochs+1, 4.0))
plt.legend()
plt.grid()
plt.show()


plt.title("Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.plot(range(1,num_epochs+1),train_acc,label="train")
plt.plot(range(1,num_epochs+1),valid_acc,label="valid")
plt.ylim((0.4,0.8))
plt.xticks(np.arange(1, num_epochs+1, 5.0))
plt.legend()
plt.grid()
plt.show()

