import math
import sys
import pickle
import time
from torch.utils.data import Dataset, DataLoader, TensorDataset
from model import LstmModel
import numpy as np
from typing import List, Tuple, Dict, Set, Union
from tqdm import tqdm
import torch
import torch.nn.utils
from pathlib import Path
from config import data_dir, batch_size, clip_grad, output_dir, embed_size, feature_size, hidden_size, lr, epoches, \
                src_path, tgt_path, src_path_29_30
from scipy import signal

def batch_leastsq(x, y):
    ## 批量线性最小二乘法计算
    x_bar = torch.mean(x, dim=1, keepdim=True)
    y_bar = torch.mean(y, dim=1, keepdim=True)
    k = ((x - x_bar) * (y - y_bar)).sum(dim=1, keepdim=True) / torch.sum((x - x_bar)**2, dim=1, keepdim=True)
    b = y_bar - k * x_bar
    return k, b 

def extract_k_b(data, size=20):
    # 提取k b作为特征
    # 1. 分段 分20段
    time_step = data.shape[2]
    batch_size = data.shape[0]
    frequency = int(time_step / size)
    channel = data.shape[1]
    k_features = torch.empty(0)
    b_features = torch.empty(0)
    for i in range(size):
        tmp_data = data[:, :, i*frequency:(i+1)*frequency]
        tmp_x = torch.arange(i*frequency+1, (i + 1)*frequency + 1, dtype=torch.float32)
        tmp_x = tmp_x.repeat(batch_size, 1)
       
        channel_k = torch.empty(0)
        channel_b = torch.empty(0)
        for c in range(channel):
            c_temp_data = tmp_data[:, c, :]
        
            k, b = batch_leastsq(tmp_x, c_temp_data)  # 得到一个通道的k，b参数数据
            channel_k = torch.cat((channel_k, k), dim=1)
            channel_b = torch.cat((channel_b, b), dim=1)
           
        if i == 0 :
            k_features = channel_k.unsqueeze(-1)
            b_features = channel_b.unsqueeze(-1)
        else :
            k_features = torch.cat((k_features, channel_k.unsqueeze(-1)), dim=-1)
            b_features = torch.cat((b_features, channel_b.unsqueeze(-1)), dim=-1)

    return k_features, b_features

class EegDataset(Dataset):
    def __init__(self, src_data, tgt_data):
        super(EegDataset).__init__()
        self.src_data = src_data
        self.tgt_data = tgt_data
        wn = 2 * 80 / 1000
        self.b, self.a = signal.butter(6, wn, "lowpass")
        
    def __getitem__(self, i):
        src, tgt = self.src_data[i], self.tgt_data[i]
        src = src - torch.mean(src, dim=0)
        detrend_data = signal.detrend(src, axis=0)
        filted_data = signal.filtfilt(self.b, self.a, detrend_data, axis=0)
        data = filted_data.astype(np.float32).copy()
        data = torch.tensor(data)
        data = data.unsqueeze(0)
        data = data.permute(0, 2, 1)
        k, b = extract_k_b(data, size=15)
        # print(torch.cat((k, b), dim=1).shape)
        return torch.cat((k, b), dim=1).squeeze(0), tgt
    
    def __len__(self):
        return len(self.src_data)


def get_right_num(pred, tgt):
    sum_sample = len(pred)
    right = 0
    pred_arg = torch.argmax(pred, dim=1)
    pred_arg = pred_arg.view(-1)
    tgt = tgt.view(-1)
    for index in range(sum_sample):
        if int(pred_arg[index].item()) == int(tgt[index].item()):
            right += 1

    return right 

def get_accuracy(pred, tgt):
    sum_sample = len(pred)
    right = 0
    pred_arg = torch.argmax(pred, dim=1)
    pred_arg = pred_arg.view(-1)
    tgt = tgt.view(-1)
    for index in range(sum_sample):
        if int(pred_arg[index].item()) == int(tgt[index].item()):
            right += 1

    return float(right) / sum_sample 


def train(src_path, tgt_path, test_src_path=None, test_tgt_path=None):
    ## src_path: "./src_data.pkl"  tgt_path: "./tgt_data.pkl"
    src_data = torch.load(src_path)
    tgt_data = torch.load(tgt_path)

    is_test = 0
    test_dataloader = None
    if test_src_path is not None and test_tgt_path is not None:
        is_test = 1
        test_src_data = torch.load(test_src_path)
        test_tgt_data = torch.load(test_tgt_path)

        # print(test_src_data.shape)
        test_dataset = EegDataset(test_src_data, test_tgt_data)
        test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

    dataset = EegDataset(src_data, tgt_data)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"using device: {device}")

    model = LstmModel(embed_size, hidden_size, feature_size=118, device=device)
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    begin_time = time.time()
    train_iter = report_loss = 0
    epoch = report_examples = 0
    log_every = 10
    last_acc = 0

    while True:
        model.train()
        epoch += 1
        for src_d, tgt_d in data_loader:
        
            train_iter += 1
            optimizer.zero_grad()
            loss = model(src_d, tgt_d)
            batch_loss = loss.item()
            loss.backward()
            # clip gradient
            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
            optimizer.step()

            report_loss += batch_loss
            report_examples += batch_size

            if train_iter % log_every == 0:
                print('epoch %d, iter %d, avg. loss %.5f ' \
                      'time elapsed %.2f sec' % (epoch, train_iter,
                                                report_loss / report_examples,
                                                time.time() - begin_time), file=sys.stderr)

                report_loss = report_examples = 0.
        
        ## 每一个epoch 保存一次模型
        if is_test == 1:
            model.eval()
            # 说明是传入了测试集
            train_right = 0
            train_size = len(dataset)
            for src_d, tgt_d in data_loader:
                pred = model(src_d)
                train_right += get_right_num(pred, tgt_d)
            train_acc = float(train_right) / train_size
            print("训练集准确率为：%f" % train_acc)

            test_right = 0
            test_size = len(test_dataset)
            for test_src, test_tgt in test_dataloader:
                pred = model(test_src)
                test_right += get_right_num(pred, test_tgt)
            test_acc = float(test_right) / test_size
            print("测试集准确率为：%f" % test_acc)

            if test_acc > last_acc:
                torch.save(model.state_dict(), "model_dict_lstm" + str(epoch) + ".pkl")
                last_acc = test_acc

        if epoch == int(epoches):
            print('reached maximum number of epochs!')
            break 
            
    
if __name__ == "__main__":
    train("./data/train_src_data_raw_375_b.pkl", "./data/train_tgt_data_raw_b.pkl", "./data/test_src_data_raw_375_b.pkl", "./data/test_tgt_data_raw_b.pkl")
    # src_data = torch.load("./src_data_raw_375.pkl")
    # tgt_data = torch.load("./tgt_data_raw.pkl")

    # dataset = EegDataset(src_data, tgt_data)

    # dataloader = DataLoader(dataset, batch_size=2, shuffle=True)

    # for src, tgt in dataloader:
    #     print(src.shape)
    #     print(tgt)
    #     break