import math
import sys
import pickle
import time
from torch.utils.data import Dataset, DataLoader, TensorDataset
from transformer import TransformerModel
import numpy as np
from typing import List, Tuple, Dict, Set, Union
from transformer import extract_k_b
import torch
import torch.nn.utils
from pathlib import Path
from config import data_dir, batch_size, clip_grad, output_dir, hidden_size, lr, epoches
            
from scipy import signal

import scipy
import csv
import os 
import scipy.io as scio
from sklearn import preprocessing
import matplotlib.pyplot as plt 

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def read_file_name(path):
    files = os.listdir(path)
    s = []
    for file in files:
        if not os.path.isdir(file):
            s.append(file)
    return s

def preprocess_raweeg(path_name, debug = False):
    f = open('./info.csv', 'r')
    reader = csv.reader(f)
    data_csv = []
    for item in reader:
        data_csv.append(item)
    f.close()
    file_names = read_file_name(path_name)

    target_data = []
    target_label = []
    index=0
    for fn in file_names:
        tmp_s = fn.split('.')[0].split('_')
        if tmp_s[0] == '':
            continue
        if debug:
            print(tmp_s)
        subject_index = int(tmp_s[0])
        exp_index = int(tmp_s[1])
        start_line = (subject_index - 1) * 6 + (exp_index - 1) * 2
        start_line = int(start_line)
        if 1 <= subject_index <= 15:
            start_point_list = [int(x) for x in data_csv[start_line][:15]]
            end_point_list = [int(x) for x in data_csv[start_line + 1][:15]]
            labels = [1, 0, -1, -1, 0, 1, -1, 0, 1, 1, 0, -1, 0, 1, -1]
        elif 22 <= subject_index <= 29:
            start_point_list = [int(x) for x in data_csv[start_line][:21]]
            end_point_list = [int(x) for x in data_csv[start_line + 1][:21]]
            labels = [1, -1, 0, -1, 1, 0, -1, 0, 1, -1, 0, 1, 1, 0, -1, -1, 0, 1, -1, 0, 1]
        else:
            print('error: subject index illegal')
            return
        if debug:
            print(path_name+fn)
            print(start_point_list)
            print(end_point_list)
            print(labels)
        index = index+1
        print(index)
        data = scio.loadmat(path_name+fn)['raweeg']

        for start_second, end_second, label in zip(start_point_list, end_point_list, labels):
            for i in range(start_second, end_second):
                tmp_data = data[:, i * 1000:(i + 1) * 1000]
            
                transpose_data = np.transpose(tmp_data)
                
                target_data.append(transpose_data)
                target_label.append(label+1) # -1 0 1 to 0 1 2
    return target_data, target_label

class EegDataset(Dataset):
    def __init__(self, src_data, tgt_data):
        super(EegDataset).__init__()
        self.src_data = src_data
        self.tgt_data = tgt_data
        wn =  [2 * 14 / 1000, 2 * 50 / 1000]
        self.b, self.a = signal.butter(6, wn, btype="band")
        
    def __getitem__(self, i):
        data, label = self.src_data[i], self.tgt_data[i]
        # #平均重参考
        
        # #去基线漂移
        
        #带通滤波
        filted_data = signal.filtfilt(
            self.b, self.a,data,axis=0
            )
        detrend_data = signal.detrend(filted_data, axis=0)

        avg_data = data - np.mean(detrend_data, axis=0)
        #降采样
        # data = data[0:1000:3,:]
        # resampled_data = filted_data
        # if len(data) == 1000:
        #     resampled_data = signal.resample(
        #         resampled_data, 334
        #     )
        # downsample= filted_data[0:1000:3,:]
        #maxmin归一化
        # res_data = resampled_data
        # res_data = preprocessing.maxabs_scale(avg_data, axis=0, copy=True)
        # filted_data = filted_data.copy()
        data = torch.tensor(avg_data, dtype=torch.float32)
        data = data.T
        # print(data.shape)
        # data = (data - torch.mean(data, dim=1)) / torch.std(data, dim=1)# 通道归一化
        k, b = extract_k_b(data, size=50)
        # print(torch.cat((k, b), dim=1).shape)
        features = torch.cat((k, b), dim=0).T 
        return features, label

    def __len__(self):
        return len(self.src_data)

def get_right_num(pred, tgt):
    sum_sample = len(pred)
    right = 0
    pred_arg = torch.argmax(pred, dim=1)
    pred_arg = pred_arg.view(-1)
    tgt = tgt.view(-1)
    for index in range(sum_sample):
        if int(pred_arg[index].item()) == int(tgt[index].item()):
            right += 1

    return right 

def get_accuracy(pred, tgt):
    sum_sample = len(pred)
    right = 0
    pred_arg = torch.argmax(pred, dim=1)
    pred_arg = pred_arg.view(-1)
    tgt = tgt.view(-1)
    for index in range(sum_sample):
        if int(pred_arg[index].item()) == int(tgt[index].item()):
            right += 1

    return float(right) / sum_sample 

def train(src_path, tgt_path, test_src_path=None, test_tgt_path=None):
    ## src_path: "./src_data.pkl"  tgt_path: "./tgt_data.pkl"
    # src_data = torch.load(src_path)
    # tgt_data = torch.load(tgt_path)
    src_data ,tgt_data = preprocess_raweeg(src_path)
    # size = len(src_data)
    # src_data = src_data[:int(size / 3)]
    # tgt_data = tgt_data[:int(size / 3)] # 数据量减半

    dataset = EegDataset(src_data, tgt_data)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    is_test = 0
    test_dataloader = None
    if test_src_path is not None and test_tgt_path is not None:
        is_test = 1
        test_src_data ,test_tgt_data = preprocess_raweeg(test_src_path)
        # test_src_data = src_data[:10]
        # test_tgt_data = tgt_data[:10]
        test_dataset = EegDataset(test_src_data, test_tgt_data)
        test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"using device: {device}")

    model = TransformerModel(device=device)
    # for name, para in model.named_parameters():
    #     print(name)
    # return 
    model.train()
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    begin_time = time.time()
    train_iter = report_loss = 0
    test_iter = 0
    epoch = report_examples = 0
    log_every = 100
    last_acc = 0

    train_acc_list = []
    test_acc_list = []
    train_loss_list = []
    test_loss_list = []

    while True:
        epoch += 1
        model.train()
        for src_d, tgt_d in data_loader:
            batch, feature_size = src_d.shape[0], src_d.shape[-1]
            add_tensor = torch.ones((batch, 1, feature_size))
            src_d = torch.cat((add_tensor, src_d), dim=1)
            # print(src_d)
            
            # x = torch.arange(1, 1001)
            # print(data.shape)
            # print(src_d.shape)
            # plt.plot(x, data[0,0,:1000])
            # # plt.show()
            # print(src_d.shape)

            # k = src_d[0,:, 0]
            # b = src_d[0, :, 62]
            # print(k)
            # print(b)
            # for i in range(50):
            #     x = torch.arange(i*20+1, (i+1)*20+1)
            #     y = k[i]*x + b[i]
            #     plt.plot(x, y, color="red")
            # plt.show()

            train_iter += 1
            optimizer.zero_grad()
            pred, loss = model(src_d, tgt_d)
            batch_loss = loss.item()
            loss.backward()
            # clip gradient
            # grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
            optimizer.step()

            report_loss += batch_loss
            report_examples += batch_size
            
            if train_iter % log_every == 0:
                print('epoch %d, iter %d, avg. loss %.5f ' \
                      'time elapsed %.2f sec' % (epoch, train_iter,
                                                report_loss / report_examples,
                                                time.time() - begin_time), file=sys.stderr)
                # train_loss_list.append((train_iter, report_loss))
                report_loss = report_examples = 0.
                
        # 计算测试准确率
        
        ## 每一个epoch 保存一次模型
        if is_test == 1:
            model.eval()
            # 说明是传入了测试集
            train_right = 0
            train_size = len(dataset)
            train_report_loss = 0
            for src_d, tgt_d in data_loader:
                batch, feature_size = src_d.shape[0], src_d.shape[-1]
                add_tensor = torch.ones((batch, 1, feature_size))
                src_d = torch.cat((add_tensor, src_d), dim=1)
                pred, loss = model(src_d, tgt_d)
                train_report_loss += loss.item()
                train_right += get_right_num(pred, tgt_d)
            train_acc = float(train_right) / train_size
            print("训练集准确率为：%f" % train_acc)
            train_acc_list.append((epoch, train_acc))
            train_loss_list.append((epoch, train_report_loss))

            test_right = 0
            test_size = len(test_dataset)
            
            test_report_loss = 0
            for test_src, test_tgt in test_dataloader:
                batch, feature_size = test_src.shape[0], test_src.shape[-1]
                add_tensor = torch.ones((batch, 1, feature_size))
                test_src = torch.cat((add_tensor, test_src), dim=1)
                test_iter += 1
                pred, loss = model(test_src, test_tgt)
                test_right += get_right_num(pred, test_tgt)
                test_report_loss += loss.item()
                
            test_loss_list.append((epoch, test_report_loss))
                
            test_acc = float(test_right) / test_size
            print("测试集准确率为：%f" % test_acc)
            test_acc_list.append((epoch, test_acc))
            
        ## 每一个epoch 保存一次模型
        if last_acc < test_acc:
            torch.save(model.state_dict(), "model_dict_transformer_" + str(epoch) + ".pkl")
            last_acc = test_acc

        with open("./transformer_res.txt", "w") as f :
            f.write(str({"train_acc": train_acc_list, "test_acc": test_acc_list, "train_loss": train_loss_list, "test_loss": test_loss_list}))
    

        model.train()
        if epoch == int(epoches):
            print('reached maximum number of epochs!')
            break 
            
    
if __name__ == "__main__":
    train("./TrainData/", "./data/train_tgt_data_raw_b.pkl", "./TestData/", "./data/test_tgt_data_raw_b.pkl")