import math
import sys
import pickle
import time
from torch.utils.data import Dataset, DataLoader, TensorDataset
from model import Cnn2DLarge
import numpy as np
from typing import List, Tuple, Dict, Set, Union
import torch
import torch.nn.utils
from pathlib import Path
from config import data_dir, batch_size, clip_grad, output_dir, hidden_size, lr, epoches
            
from scipy import signal

import scipy
import csv
import os 
import scipy.io as scio
from sklearn import preprocessing
import matplotlib.pyplot as plt 

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def read_file_name(path):
    files = os.listdir(path)
    s = []
    for file in files:
        if not os.path.isdir(file):
            s.append(file)
    return s

def preprocess_raweeg(path_name, debug = False):
    f = open('./info.csv', 'r')
    reader = csv.reader(f)
    data_csv = []
    for item in reader:
        data_csv.append(item)
    f.close()
    file_names = read_file_name(path_name)

    target_data = []
    target_label = []
    index=0
    for fn in file_names:
        tmp_s = fn.split('.')[0].split('_')
        if tmp_s[0] == '':
            continue
        if debug:
            print(tmp_s)
        subject_index = int(tmp_s[0])
        exp_index = int(tmp_s[1])
        start_line = (subject_index - 1) * 6 + (exp_index - 1) * 2
        start_line = int(start_line)
        if 1 <= subject_index <= 15:
            start_point_list = [int(x) for x in data_csv[start_line][:15]]
            end_point_list = [int(x) for x in data_csv[start_line + 1][:15]]
            labels = [1, 0, -1, -1, 0, 1, -1, 0, 1, 1, 0, -1, 0, 1, -1]
        elif 22 <= subject_index <= 29:
            start_point_list = [int(x) for x in data_csv[start_line][:21]]
            end_point_list = [int(x) for x in data_csv[start_line + 1][:21]]
            labels = [1, -1, 0, -1, 1, 0, -1, 0, 1, -1, 0, 1, 1, 0, -1, -1, 0, 1, -1, 0, 1]
        else:
            print('error: subject index illegal')
            return
        if debug:
            print(path_name+fn)
            print(start_point_list)
            print(end_point_list)
            print(labels)
        index = index+1
        print(index)
        data = scio.loadmat(path_name+fn)['raweeg']

        for start_second, end_second, label in zip(start_point_list, end_point_list, labels):
            for i in range(start_second, end_second):
                tmp_data = data[:, i * 1000:(i + 1) * 1000]
            
                transpose_data = np.transpose(tmp_data)
                
                target_data.append(transpose_data)
                target_label.append(label+1) # -1 0 1 to 0 1 2
    return target_data, target_label

class EegDataset(Dataset):
    def __init__(self, src_data, tgt_data):
        super(EegDataset).__init__()
        self.src_data = src_data
        self.tgt_data = tgt_data
        wn =  2 * 80 / 1000
        self.b, self.a = signal.butter(6, wn, btype="low")
        
    def __getitem__(self, i):
        data, label = self.src_data[i], self.tgt_data[i]

        # data = data[:, :13]
        #平均重参考
        avg_data1 = data - np.mean(data, axis=0)

        #去基线漂移
        detrend_data1 = signal.detrend(avg_data1, axis=0)
    
        filted_data1 = signal.filtfilt(
            self.b, self.a,detrend_data1,axis=0
            )
       
        #降采样
        # data = data[0:1000:3,:]
        # resampled_data = filted_data
        # if len(data) == 1000:
        #     resampled_data = signal.resample(
        #         resampled_data, 334
        #     )
        
        # res_data = filted_data1[0:1000:3,:]
        resampled_data = signal.resample(
                filted_data1, 334
            )
        #maxmin归一化
        # res_data = resampled_data

        res_data1 = preprocessing.maxabs_scale(resampled_data, axis=0, copy=True)
        # res_data2 = preprocessing.maxabs_scale(downsample2, axis=0, copy=True)

        return torch.tensor(res_data1, dtype=torch.float32), label
        # return res_data1, label

    def __len__(self):
        return len(self.src_data)

def get_right_num(pred, tgt):
    sum_sample = len(pred)
    right = 0
    pred_arg = torch.argmax(pred, dim=1)
    pred_arg = pred_arg.view(-1)
    tgt = tgt.view(-1)
    for index in range(sum_sample):
        if int(pred_arg[index].item()) == int(tgt[index].item()):
            right += 1

    return right 

def get_accuracy(pred, tgt):
    sum_sample = len(pred)
    right = 0
    pred_arg = torch.argmax(pred, dim=1)
    pred_arg = pred_arg.view(-1)
    tgt = tgt.view(-1)
    for index in range(sum_sample):
        if int(pred_arg[index].item()) == int(tgt[index].item()):
            right += 1

    return float(right) / sum_sample 

def train(src_path, tgt_path, test_src_path=None, test_tgt_path=None):
    ## src_path: "./src_data.pkl"  tgt_path: "./tgt_data.pkl"
    # src_data = torch.load(src_path)
    # tgt_data = torch.load(tgt_path)

    print("src_path is " + str(src_path))
    print("test_path is " + str(test_src_path))

    src_data ,tgt_data = preprocess_raweeg(src_path)

    # size = len(src_data)
    # src_data = src_data[:int(size / 2)]
    # tgt_data = tgt_data[:int(size / 2)] # 数据量减半

    dataset = EegDataset(src_data, tgt_data)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    is_test = 0
    test_dataset = None
    test_dataloader = None
    if test_src_path is not None and test_tgt_path is not None:
        is_test = 1
        test_src_data ,test_tgt_data = preprocess_raweeg(test_src_path)
        # test_src_data = src_data[:10]
        # test_tgt_data = tgt_data[:10]
        test_dataset = EegDataset(test_src_data, test_tgt_data)
        test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"using device: {device}")

    model = Cnn2DLarge(device=device)
    model.train()

    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    begin_time = time.time()
    train_iter = report_loss = 0
    test_iter = 0
    epoch = report_examples = 0
    log_every = 100
    last_acc = 0

    train_acc_list = []
    test_acc_list = []
    train_loss_list = []
    test_loss_list = []
    
    while True:
        epoch += 1
        train_loss_record = 0.0
        train_right = 0
        for src_d1, tgt_d in data_loader:
            # x = torch.arange(1, 335)
            # plt.plot(x, src_d[0,:334,0])
            # plt.show()
            # # print(src_d.shape)
            # print(src_d)
            # print(src_d1)
            # return 
            train_iter += 1
            optimizer.zero_grad()
            pred, loss = model(src_d1, tgt_d)
            
            batch_loss = loss.item()
            loss.backward()

            train_right += get_right_num(pred, tgt_d)
            train_loss_record += batch_loss
            # clip gradient
            # grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
            optimizer.step()

            report_loss += batch_loss
            report_examples += batch_size
            
            if train_iter % log_every == 0:
                print('epoch %d, iter %d, avg. loss %.5f ' \
                      'time elapsed %.2f sec' % (epoch, train_iter,
                                                report_loss / report_examples,
                                                time.time() - begin_time), file=sys.stderr)
                # train_loss_list.append((train_iter, report_loss))
                report_loss = report_examples = 0.
                

        ## 每一个epoch 保存一次模型
        if is_test == 1:
            model.eval()
            with torch.no_grad():
                # 说明是传入了测试集
                # train_right = 0
                train_size = len(dataset)
                # train_report_loss = 0
                # for src_d1, src_d2, tgt_d in data_loader:
                #     pred, loss = model([src_d1, src_d2], tgt_d)
                #     train_report_loss += loss.item()
                #     train_right += get_right_num(pred, tgt_d)
                train_acc = float(train_right) / train_size
                print("训练集准确率为：%f" % train_acc)
                train_acc_list.append((epoch, train_acc))
                train_loss_list.append((epoch, train_loss_record / train_size))

                test_right = 0
                test_size = len(test_dataset)
                
                test_report_loss = 0
                for test_src1, test_tgt in test_dataloader:
                    test_iter += 1
                    pred, loss = model(test_src1, test_tgt)
                    test_right += get_right_num(pred, test_tgt)
                    test_report_loss += loss.item()
                    
                test_loss_list.append((epoch, test_report_loss / test_size))  

                test_acc = float(test_right) / test_size
                print("测试集准确率为：%f" % test_acc)
                test_acc_list.append((epoch, test_acc))
            
        ## 每一个epoch 保存一次模型
        if last_acc < test_acc:
            torch.save(model.state_dict(), "model_dict_cnn_large_" + str(epoch) + ".pkl")
            last_acc = test_acc

        with open("./cnn_large_res.txt", "w") as f :
            f.write(str({"train_acc": train_acc_list, "test_acc": test_acc_list, "train_loss": train_loss_list, "test_loss": test_loss_list}))
    

        model.train()
        if epoch == int(epoches):
            print('reached maximum number of epochs!')
            break 
            
    
if __name__ == "__main__":
    train("/home/ubuntu/huangd_test/TrainData/", "./data/train_tgt_data_raw_b.pkl", "/home/ubuntu/huangd_test/TestData/", "./data/test_tgt_data_raw_b.pkl")