import os
import time
from typing import List, Tuple

import kmeans_pytorch
import numpy
import pandas
import torch
import torch.utils.data as tud
from sklearn.metrics import roc_curve
from torch import nn
from tqdm import tqdm

from dataset_multi import TimeDataset
from evaluate_method import tsad_score
from gdn_util.net_struct import get_feature_map, get_fc_graph_struc
from gdn_util.preprocess import build_loc_net


os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = 'cuda' if torch.cuda.is_available() else 'cpu'

normal = 0
attack = 1

# here, INPUT_SIZE is used for creating datasets and means the sampled time length of one batch data
INPUT_SIZE = 30
WINDOW_SIZE = 1

EPOCHS = 10


def get_edge_index(ds_path):
    dataset = ds_path
    feature_map = get_feature_map(dataset)
    fc_struc = get_fc_graph_struc(dataset)

    csv_file = os.listdir(f'{dataset}/train')

    train = pandas.read_csv(
        f'{dataset}/train/{csv_file[0]}', sep=',', index_col=0)
    if 'Normal/Attack' in train.columns:
        train = train.drop(columns=['Normal/Attack'])

    fc_edge_index = build_loc_net(fc_struc, list(
        train.columns), feature_map=feature_map)
    fc_edge_index = torch.tensor(fc_edge_index, dtype=torch.long).to(device)
    return fc_edge_index, feature_map


def create_dataloader(ds_path,
                      timemode,
                      reverse_label,
                      batch_size,
                      input_size=INPUT_SIZE,
                      window_size=1) -> Tuple[tud.DataLoader, tud.DataLoader]:
    """
    create a normal dataloader and an attack dataloader for one dataset
    :param batch_size: batch_size for training and testing
    :param reverse_label: whether the label should be reversed or not. We define the 0 as normal and 1 as attack,
    but some other datasets don't think so.
    :param timemode: whether the dataset contains any timestamp column. If contains, set it to true.
    :param ds_path: the dataset path
    :param input_size: input size of the AttnDataset
    :param window_size: window size of the AttnDataset
    :return: two dataloaders. normal dataloader and attack dataloader
    """

    normal_ds = TimeDataset(path=f'{ds_path}/train/', mode='train', timemode=timemode, label=False,
                            input_size=input_size, windowsize=window_size)
    normal_dl = tud.DataLoader(
        dataset=normal_ds, batch_size=batch_size, shuffle=True)
    attack_ds = TimeDataset(path=f'{ds_path}/test/', mode='test', timemode=timemode, label=True, test_size=1,
                            input_size=input_size, windowsize=window_size, reverse_label=reverse_label)
    attack_dl = tud.DataLoader(
        dataset=attack_ds, batch_size=batch_size, shuffle=False)

    return normal_dl, attack_dl


def create_all_dataloader(data_directory,
                          window_size=1,
                          batch_size=64,
                          input_size=INPUT_SIZE):
    """
    create the dataloader for all the datasets in the data directory
    :param data_directory: data directory path
    :param window_size: window size for the dataset
    :param input_size: input_size for the dataset
    :return: list of dataloaders
    """
    dataset_dirs = os.listdir(data_directory)
    all_dataloader = []  # type: List[dict[str: tud.dl]]
    for dataset_dir in dataset_dirs:
        print(f'start building dataloader {dataset_dir}')
        if dataset_dir in ['SWAT', 'WADI']:
            # SWAT and WADI datasets both don't have sub datasets
            # SWAT and WADI datasets both have timestamp column, and their labels need to reverse
            normal_dl, attack_dl = create_dataloader(ds_path=os.path.join(data_directory, dataset_dir),
                                                     batch_size=batch_size,
                                                     timemode=True,
                                                     reverse_label=True,
                                                     input_size=input_size,
                                                     window_size=window_size)
            all_dataloader.append({
                'dataset_name': dataset_dir,
                'normal': normal_dl,
                'attack': attack_dl,
            })
        else:
            for sub_dataset_dir in os.listdir(os.path.join(data_directory, dataset_dir)):
                if not os.path.isdir(os.path.join(data_directory, dataset_dir, sub_dataset_dir)):
                    continue
                # Now we will create the dataloaders of each sub dataset of MSL, SMAP and SMD
                # Note: These three datasets all don't have timestamp column, and the normal and anomaly labels don't
                # need to reverse.
                normal_dl, attack_dl = create_dataloader(
                    ds_path=os.path.join(
                        data_directory, dataset_dir, sub_dataset_dir),
                    timemode=False,
                    reverse_label=False,
                    input_size=input_size,
                    window_size=window_size,
                    batch_size=batch_size)
                all_dataloader.append({
                    'dataset_name': f'{dataset_dir}/{sub_dataset_dir}',
                    'normal': normal_dl,
                    'attack': attack_dl,
                })
            print(f'finish building dataloader {dataset_dir}\n'
                  '---------------------------')
    return all_dataloader


class AnomalyConfusionMatrix:

    def __init__(self, tp=0, tn=0, fn=0, fp=0):
        self._cm = [[tp, fp], [fn, tn]]
        self.time = 0
        self._total_y_pred = None
        self._total_y_true = None

    def add_data(self, y_pred: torch.Tensor, y_true: torch.Tensor):
        """
        add y_true and y_pred data to AnomalyConfusionMatrix
        :param y_true true data 
        :param y_pred prediction data
        """
        # We should transfer the y_pred to numpy array where reason is in the next comment
        y_pred = y_pred.view(-1, y_pred.shape[-1]).cpu().detach().numpy()
        y_true = y_true.view(-1, y_true.shape[-1])

        # If y_true and y_pred's shape are not the same
        if y_pred.shape[0] != y_true.shape[0]:
            raise ArithmeticError(f'y_pred and y_true are not in the same shape. '
                                  f'y_pred: {y_pred.shape[0]}, y_true: {y_true.shape[0]}')

        if self._total_y_pred is None and self._total_y_true is None:
            self._total_y_pred = y_pred
            self._total_y_true = y_true
            return

        # concat the self._total_y_pred in the numpy way not in the torch way,
        # so that the GPU memory won't burst out
        self._total_y_pred = numpy.concatenate(
            (self._total_y_pred, y_pred), axis=0)
        self._total_y_true = torch.concat([self._total_y_true, y_true], dim=0)

    def roc_threshold(self):
        self._total_y_true = self._total_y_true.cpu().detach().numpy()
        self._total_y_pred[numpy.isnan(self._total_y_pred)] = 0
        fpr, tpr, thresholds = roc_curve(
            self._total_y_true, self._total_y_pred, pos_label=attack)
        ks = (tpr - fpr).argmax()
        threshold = thresholds[ks]
        print(f"threshold: {threshold}")
        self._total_y_pred = numpy.where(
            self._total_y_pred > threshold, attack, normal)
        for i in range(self._total_y_pred.shape[0]):
            self._cm[int(self._total_y_pred[i])][int(
                self._total_y_true[i])] += 1

    def kmeans(self):
        self._total_y_pred = torch.from_numpy(self._total_y_pred).to(device)
        self._total_y_pred, centroids = kmeans_pytorch.kmeans(X=self._total_y_pred,
                                                              num_clusters=2,
                                                              distance='euclidean',
                                                              tol=1e-5,
                                                              device=torch.device(device))
        # convert torch.Tensor· to numpy from GPU(or CPU) to CPU
        self._total_y_pred = self._total_y_pred.cpu().detach().numpy()
        # don't ask me why here we need to use another variable total_y_pred to get the data
        # I don't know. It works!
        # Do not refactor code here!!!
        total_y_pred = self._total_y_pred[:, numpy.newaxis]
        self._total_y_true = self._total_y_true.cpu().detach().numpy()
        # We assume that num of attack should be less than num of normal
        if numpy.count_nonzero(total_y_pred == attack) > len(total_y_pred) / 2:
            # if num of attack is more than num of normal
            total_y_pred = (total_y_pred - 1) ** 2
        for i in range(total_y_pred.shape[0]):
            self._cm[int(total_y_pred[i])][int(self._total_y_true[i])] += 1

    def to_dataframe(self):
        true_pred_dict = {
            'true': self._total_y_true.squeeze().tolist(),
            'pred': self._total_y_pred.squeeze().tolist()
        }

        true_pred_df = pandas.DataFrame(true_pred_dict)
        return true_pred_df

    @property
    def precision(self) -> float:
        try:
            return self._cm[attack][attack] / (self._cm[attack][attack] + self._cm[attack][normal])
        except ZeroDivisionError:
            return 0

    @property
    def accuracy(self) -> float:
        try:
            return (self._cm[attack][attack] + self._cm[normal][normal]) / \
                   (self._cm[attack][attack] + self._cm[attack][normal] +
                    self._cm[normal][attack] + self._cm[normal][normal])
        except ZeroDivisionError:
            return 0

    @property
    def recall(self) -> float:
        try:
            return self._cm[attack][attack] / (self._cm[attack][attack] + self._cm[normal][attack])
        except ZeroDivisionError:
            return 0

    @property
    def f1_score(self) -> float:
        try:
            return 2 * self.precision * self.recall / (self.precision + self.recall)
        except ZeroDivisionError:
            return 0

    @property
    def tsad_score(self) -> float:
        try:
            return tsad_score(y_true=self._total_y_true, y_pred=self._total_y_pred, pos_label=attack)
        except RuntimeError:
            print('tsad score runtime error')
            return 0
        except ZeroDivisionError:
            return 0

    @property
    def total_samples(self) -> int:
        return self._cm[normal][normal] + self._cm[normal][attack] + self._cm[attack][normal] + self._cm[attack][attack]

    def __str__(self):
        return f'f1 score: {self.f1_score:.3f} ' \
               f'tsad score: {self.tsad_score:.3f} ' \
               f'precision: {self.precision:.3f} ' \
               f'recall: {self.recall:.3f} ' \
               f'detection time: {self.time:.2f}s\n'


def train_model(epochs: int,
                dataloader: tud.DataLoader,
                dataset: str,
                data_dir: str,
                is_recur: bool,
                continue_train: bool,
                compute_loss,
                build_model):
    """
    Train a model by using default training process
    :param epochs: epochs to train a model
    :param dataloader: dataloader for the model
    :param dataset: dataset name
    :param is_recur: whether the model is recurrent or not. will affect the input_size of the model
    :param continue_train: whether the model should continue training even a pth file is trained
    :param compute_loss: loss function to compute loss and backward
    :param build_model: build_model function to build a model with input_size
    :return:
    """
    ds_path = os.path.join(data_dir, dataset)
    if is_recur:
        model, optimizer = build_model(
            input_size=dataloader.dataset.feature_size, ds_path=ds_path)
    else:
        # TODO: will change the data_demo to data in the end
        model, optimizer = build_model(
            input_size=dataloader.dataset.time_feature, ds_path=ds_path)
    try:
        # we replace it because the use of dataset is changed to set the pth name of the model
        dataset = dataset.replace('/', '_')
        if not os.path.exists('./model'):
            os.mkdir('./model')
        model.load_state_dict(torch.load(
            f'./model/{model.name}_for_dataset_{dataset}.pth'))
        print(f'load previous {model.name} successfully')
        if not continue_train:
            print(f'{continue_train} continue_train, stop training model.')
            return model
    except FileNotFoundError:
        print(f'{model.name} pth file not found, start to train a new model')
    except RuntimeError:
        print(
            f'{model.name} pth file does not match the model size, start to train a new model')
    time.sleep(1)

    for epoch in range(epochs):
        avg_loss = []
        with tqdm(total=len(dataloader), ncols=100) as _tqdm:
            _tqdm.set_description(f'epoch:{epoch + 1}/{epochs}')
            for batch_num, data in enumerate(dataloader):
                x = data[0].to(device)
                optimizer.zero_grad()

                # using compute_loss func to compute loss of every model
                loss = compute_loss(
                    model=model, x=x, y=data[2].to(device), epoch=epoch)
                loss.backward()
                avg_loss.append(loss.item())

                optimizer.step()

                # calculate real-time loss and show it on the screen
                _tqdm.set_postfix(loss='{:.6f}'.format(
                    sum(avg_loss) / len(avg_loss)))
                _tqdm.update(1)

    if not os.path.exists('./model'):
        os.mkdir('./model')
    torch.save(model.state_dict(),
               f'./model/{model.name}_for_dataset_{dataset}.pth')


def default_y_pred_func(model: nn.Module, data: torch.Tensor, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
    x = data[0].to(device)

    y_true = data[1].int().to(device)
    _, out = model(x)

    out, x = out.contiguous().view(
        out.shape[0], -1), x.contiguous().view(x.shape[0], -1)
    return torch.unsqueeze(torch.pairwise_distance(out, x), dim=-1), y_true


def test_model(dataloader: tud.DataLoader,
               dataset: str,
               data_dir: str,
               is_recur: bool,
               build_model,
               pred_func) -> AnomalyConfusionMatrix:
    print(f'start testing with dataset {dataset}')
    ds_path = os.path.join(data_dir, dataset)
    if is_recur:
        model, _ = build_model(
            input_size=dataloader.dataset.feature_size, ds_path=ds_path)
    else:
        model, _ = build_model(
            input_size=dataloader.dataset.time_feature, ds_path=ds_path)
    try:
        dataset = dataset.replace('/', '_')
        model.load_state_dict(torch.load(
            f'./model/{model.name}_for_dataset_{dataset}.pth'))
    except FileNotFoundError:
        print(
            f'{model.name} model not built, please run train_{model.name}_model func first')
        return AnomalyConfusionMatrix()
    time.sleep(1)
    confusion_matrix = AnomalyConfusionMatrix()

    # calcuate total detection time
    t1 = time.clock()
    with tqdm(total=len(dataloader), ncols=100) as _tqdm:
        with torch.no_grad():
            for _, data in enumerate(dataloader):
                # use a prediction function to create the y_pred for comparing with y_true
                y_pred, y_true = pred_func(model=model, data=data)

                confusion_matrix.add_data(y_pred=y_pred, y_true=y_true)

                _tqdm.update(1)
    t2 = time.clock()

    confusion_matrix.roc_threshold()
    confusion_matrix.time = t2 - t1
    print(confusion_matrix)
    return confusion_matrix


def train_and_test_model(model_name: str,
                         compute_loss,
                         build_model,
                         normal_dl: tud.DataLoader,
                         attack_dl: tud.DataLoader,
                         epochs: int,
                         is_recur: bool,
                         dataset: str,
                         data_dir: str,
                         continue_train: bool,
                         test_pred_func=default_y_pred_func) -> AnomalyConfusionMatrix:
    """
    train and test one model
    :param test_pred_func: func(out, x, y, **kwargs) -> torch.Tensor the comparing function used in test_model
    to compare model result with true result
    :param is_recur: whether the model is recurrent (contains RNN, LSTM or GRU) or not
    :param dataset: dataset name for the use of the model
    :param model_name: the name of the model
    :param compute_loss: func(model, x, y) -> nn.Loss: a function to compute the loss of one batch
    :param build_model: func(input_size, ds_path) -> nn.Module: the function to create one model
    :param normal_dl: the dataloader containing normal data
    :param attack_dl: the dataloader containing attack data to be tested
    :param epochs: epochs to train the model
    :param continue_train: whether the model would be trained again or not even with the pth file existed
    :return: a confusion matrix with data inside
    """

    print(f'start training {model_name} for dataset {dataset}')
    train_model(epochs=epochs,
                is_recur=is_recur,
                dataloader=normal_dl,
                compute_loss=compute_loss,
                build_model=build_model,
                data_dir=data_dir,
                dataset=dataset,
                continue_train=continue_train)
    print(f'finish training {model_name} for dataset {dataset}\n'
          '-----------------------------------')

    print(f'start testing {model_name} for dataset {dataset}')

    confusion_matrix = test_model(dataloader=attack_dl,
                                  dataset=dataset,
                                  data_dir=data_dir,
                                  is_recur=is_recur,
                                  build_model=build_model,
                                  pred_func=test_pred_func)
    print(f'finish testing {model_name} for dataset {dataset}\n'
          '-----------------------------')

    return confusion_matrix
