import os
from multiprocessing import cpu_count

from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
from torch.utils.data import Dataset, Subset, DataLoader
import torchvision
from imblearn.over_sampling import SMOTE, ADASYN
# --------------------------------------------------------------------------------------------------------------------

PARSE_CHUNK_SIZE = 16

DATASET_DIR = os.getenv('DATASET_DIR')


# --------------------------------------------------------------------------------------------------------------------

def extend_loader(loader):
    def load(**kwargs):

        kwargs['split'] = 'train' if kwargs.get('train') else 'test'
        kwargs.pop('train')

        dataset = loader(**kwargs)

        if not hasattr(dataset, 'targets'):

            workers = cpu_count()

            dataset_loader = DataLoader(dataset, batch_size=PARSE_CHUNK_SIZE, shuffle=True,
                                        num_workers=workers, collate_fn=None)
            targets = []

            pbar = tqdm(dataset_loader)

            for _, target in pbar:
                targets.extend(target)

                pbar.set_description('parse targets')

            dataset.targets = targets
            dataset.classes = np.unique(targets)

        return dataset

    return load


# --------------------------------------------------------------------------------------------------------------------


DATASET_LOADER = {'cifar10': torchvision.datasets.CIFAR10,
                  'mnist': torchvision.datasets.MNIST,
                  'svhn': extend_loader(torchvision.datasets.SVHN)}

# --------------------------------------------------------------------------------------------------------------------

pil_to_tensor = torchvision.transforms.ToTensor()


# --------------------------------------------------------------------------------------------------------------------


class DatasetResampler(Dataset):

    def __init__(self, src, targets, classes, seed=None):

        self.src = src
        self.subset = src

        self.targets = targets
        self.classes = classes

        self.groups = None
        self.frequency = None
        self.weights = None

        self.random = np.random.RandomState(seed=seed)

        self.reset()
        self.reset_weights()

    def reset(self):

        targets = np.unique(self.targets)

        self.groups = {}

        for target in targets:
            self.groups[target] = self.cluster_set(target)

    def reset_weights(self):

        num_classes = len(self.classes)
        num_examples = len(self.subset)

        self.frequency = {key: len(value) for key, value in self.groups.items()}
        self.weights = {key: (num_classes * self.frequency[key]) / num_examples for key in self.groups}

    def cluster_set(self, target):

        indices = np.where(self.targets == target)[0]
        return indices

    def get_resample_frequency(self, target, weight):

        old_frequency = self.frequency[target]
        #new_frequency = (weight / self.weights[target]) * old_frequency
        new_frequency = old_frequency * 5
        return int(np.round(new_frequency))

    def sample_indices(self):

        indices = []

        for key in self.groups:
            indices.extend(self.groups[key])

        return indices

    def resample_step(self, target, weight):

        count = self.get_resample_frequency(target, weight)
        replace = (count > self.frequency[target])

        self.groups[target] = self.random.choice(self.groups[target], size=count, replace=replace)
        self.subset = Subset(self.src, self.sample_indices())
        self.targets = Subset(self.targets, self.sample_indices())
        self.weights[target] *= weight

    def resample(self, resample_dict):

        if resample_dict is None:
            return self

        for target, weight in resample_dict.items():
            self.resample_step(target, weight)

        return self

    def __getitem__(self, index):

        inputs = self.subset[index]
        targets = self.targets[index]
        return inputs, targets

    def __len__(self):

        return len(self.subset)


# --------------------------------------------------------------------------------------------------------------------

def get_splits(name, download=False, resample_dict=None, use_valid=True, valid_size=0.1, seed=None):
    assert 0 < valid_size < 1, 'valid_size must be in range (0, 1)'

    root = DATASET_DIR + '/' + name
    loader = DATASET_LOADER[name]

    train_dataset = loader(root=root, train=True, transform=pil_to_tensor, target_transform=None, download=download)
    test_dataset = loader(root=root, train=False, transform=pil_to_tensor, target_transform=None, download=download)

    train_targets = np.array(train_dataset.targets)
    test_targets = np.array(test_dataset.targets)

    classes = set(train_dataset.classes).union(set(test_dataset.classes))
    classes = np.array(list(classes))

    splits = {}

    if use_valid:

        random = np.random.RandomState(seed=seed)

        n = len(train_dataset)
        sample_size = int(valid_size * n)

        indices = np.arange(n)
        random.shuffle(indices)

        train_indices, valid_indices = indices[:-sample_size], indices[-sample_size:]

        train_targets, valid_targets = train_targets[train_indices], train_targets[valid_indices]
        train_dataset, valid_dataset = Subset(train_dataset, train_indices), Subset(train_dataset, valid_indices)

        splits['train'] = DatasetResampler(train_dataset, targets=train_targets, classes=classes, seed=seed)
        splits['valid'] = DatasetResampler(valid_dataset, targets=valid_targets, classes=classes, seed=seed)
        splits['test'] = DatasetResampler(test_dataset, targets=test_targets, classes=classes, seed=seed)

    else:

        splits['train'] = DatasetResampler(train_dataset, targets=train_targets, classes=classes, seed=seed)
        splits['test'] = DatasetResampler(test_dataset, targets=test_targets, classes=classes, seed=seed)

    splits['train'] = splits['train'].resample(resample_dict)

    return splits


def get_splits_v2(path, resample_dict=None, rolling_window_size=10, seed=None):
    data = pd.read_csv(path)
    data = data.drop("Time", axis=1)
    scaler = StandardScaler()
    data['std_Amount'] = scaler.fit_transform(data['Amount'].values.reshape(-1, 1))
    data = data.drop("Amount", axis=1)
    cols = data.columns.tolist()
    cols = [c for c in cols if c not in ["Class"]]
    target = "Class"
    dataset = data[cols]
    targets = data[target]
    dataset_new = np.zeros(
        [(dataset.shape[0] - rolling_window_size) * rolling_window_size, dataset.shape[1]])
    targets_new = []
    for i in range((dataset.shape[0] - rolling_window_size)):
        beg = 0 + i
        end = beg + rolling_window_size
        s = np.array(dataset[beg:end], dtype=np.float)
        dataset_new[(rolling_window_size * i):(rolling_window_size * (i + 1)), :] = s
        targets_new.append(targets[end])
    targets = np.array(targets_new)
    dataset_new = dataset_new[:, 0::]
    size = np.shape(dataset)[1]
    dataset = dataset_new.reshape((np.shape(dataset_new)[0] // rolling_window_size),
                                  rolling_window_size,
                                  np.shape(dataset)[1])

    dataset = dataset.astype(np.float32)
    classes = set(targets)
    classes = np.array(list(classes))
    splits = {}
    train_dataset, test_dataset, train_targets, test_targets = train_test_split(dataset, targets, test_size=0.2,
                                                                                random_state=seed)
    train_dataset, valid_dataset, train_targets, valid_targets = train_test_split(train_dataset, train_targets,
                                                                                  test_size=0.2,
                                                                                  random_state=seed)
    #train_dataset, train_targets = SMOTE(random_state=seed, sampling_strategy=0.1).fit_resample(train_dataset, train_targets)
    splits['train'] = DatasetResampler(train_dataset, targets=train_targets, classes=classes, seed=seed)
    splits['valid'] = DatasetResampler(valid_dataset, targets=valid_targets, classes=classes, seed=seed)
    splits['test'] = DatasetResampler(test_dataset, targets=test_targets, classes=classes, seed=seed)

    #splits['train'] = splits['train'].resample(resample_dict)

    return splits, size


# --------------------------------------------------------------------------------------------------------------------
def get_splits_v1(path, resample_dict=None, rolling_window_size=10, seed=None):
    data = pd.read_json(path, lines=True)
    data.drop(['merchantCity', 'merchantState', 'merchantZip', 'echoBuffer', 'posOnPremises', 'recurringAuthInd'],
              axis=1, inplace=True)
    le = LabelEncoder()
    var = ['merchantName', 'acqCountry', 'merchantCountryCode', 'posEntryMode', 'posConditionCode',
           'merchantCategoryCode', 'transactionType', 'cardPresent', 'expirationDateKeyInMatch', 'isFraud']
    for i in var:
        data[i] = le.fit_transform(data[i])
    # converting in datetime format
    data['transactionDateTime'] = pd.to_datetime(data['transactionDateTime'])
    data['currentExpDate'] = pd.to_datetime(data['currentExpDate'])
    data['accountOpenDate'] = pd.to_datetime(data['accountOpenDate'])
    data['dateOfLastAddressChange'] = pd.to_datetime(data['dateOfLastAddressChange'])
    # extractind year, month, day, hour, minute and seconds from datetime columns
    data['transactionDateTime_year'] = data['transactionDateTime'].dt.year
    data['transactionDateTime_month'] = data['transactionDateTime'].dt.month
    data['transactionDateTime_day'] = data['transactionDateTime'].dt.day
    data['transactionDateTime_hour'] = data['transactionDateTime'].dt.hour
    data['transactionDateTime_minute'] = data['transactionDateTime'].dt.minute
    data['transactionDateTime_second'] = data['transactionDateTime'].dt.second

    data['currentExpDate_year'] = data['currentExpDate'].dt.year
    data['currentExpDate_month'] = data['currentExpDate'].dt.month
    data['currentExpDate_day'] = data['currentExpDate'].dt.day

    data['accountOpenDate_year'] = data['accountOpenDate'].dt.year
    data['accountOpenDate_month'] = data['accountOpenDate'].dt.month
    data['accountOpenDate_day'] = data['accountOpenDate'].dt.day

    data['dateOfLastAddressChange_year'] = data['dateOfLastAddressChange'].dt.year
    data['dateOfLastAddressChange_month'] = data['dateOfLastAddressChange'].dt.month
    data['dateOfLastAddressChange_day'] = data['dateOfLastAddressChange'].dt.day
    # drop datetime column
    data.drop('transactionDateTime', axis=1, inplace=True)
    data.drop('currentExpDate', axis=1, inplace=True)
    data.drop('accountOpenDate', axis=1, inplace=True)
    data.drop('dateOfLastAddressChange', axis=1, inplace=True)
    data.drop(['enteredCVV', 'customerId', 'availableMoney'], axis=1, inplace=True)
    # 以上数据预处理完毕
    dataset = data.drop('isFraud', axis=1)
    dataset = np.array(dataset)
    train_dataset, valid_dataset, test_dataset = np.split(dataset, [int(len(dataset) * 0.7), int(len(dataset) * 0.8)])

    targets = np.array(data.isFraud)
    train_targets, valid_targets, test_targets = np.split(targets, [int(len(dataset) * 0.7), int(len(dataset) * 0.8)])
    classes = set(train_targets).union(set(valid_targets)).union(set(test_targets))
    classes = np.array(list(classes))
    splits = {}
    # Normalizing data so that all variables follow the same scale (0 to 1)
    scaler = MinMaxScaler()
    # Fit only to the training data
    train_dataset = scaler.fit_transform(train_dataset)
    valid_dataset = scaler.transform(valid_dataset)
    test_dataset = scaler.transform(test_dataset)

    train_dataset_new = np.zeros(
        [(train_dataset.shape[0] - rolling_window_size) * rolling_window_size, train_dataset.shape[1]])
    train_targets_new = []
    for i in range((train_dataset.shape[0] - rolling_window_size)):
        beg = 0 + i
        end = beg + rolling_window_size
        s = np.array(train_dataset[beg:end], dtype=np.float)
        train_dataset_new[(rolling_window_size * i):(rolling_window_size * (i + 1)), :] = s
        train_targets_new.append(train_targets[end])
    train_targets = np.array(train_targets_new)
    train_dataset_new = train_dataset_new[:, 0::]
    train_dataset = train_dataset_new.reshape((np.shape(train_dataset_new)[0] // rolling_window_size),
                                              rolling_window_size,
                                              np.shape(train_dataset)[1])
    train_dataset = train_dataset.astype(np.float32)
    test_dataset_new = np.zeros(
        [(test_dataset.shape[0] - rolling_window_size) * rolling_window_size, test_dataset.shape[1]])
    test_targets_new = []
    for i in range((test_dataset.shape[0] - rolling_window_size)):
        beg = 0 + i
        end = beg + rolling_window_size
        s = np.array(test_dataset[beg:end], dtype=np.float)
        test_dataset_new[(rolling_window_size * i):(rolling_window_size * (i + 1)), :] = s
        test_targets_new.append(test_targets[end])
    test_targets = np.array(test_targets_new)
    test_dataset_new = test_dataset_new[:, 0::]
    test_dataset = test_dataset_new.reshape((np.shape(test_dataset_new)[0] // rolling_window_size),
                                            rolling_window_size,
                                            np.shape(test_dataset)[1])
    test_dataset = test_dataset.astype(np.float32)
    valid_dataset_new = np.zeros(
        [(valid_dataset.shape[0] - rolling_window_size) * rolling_window_size, valid_dataset.shape[1]])
    valid_targets_new = []
    size = np.shape(valid_dataset)[1]
    for i in range((valid_dataset.shape[0] - rolling_window_size)):
        beg = 0 + i
        end = beg + rolling_window_size
        s = np.array(valid_dataset[beg:end], dtype=np.float)
        valid_dataset_new[(rolling_window_size * i):(rolling_window_size * (i + 1)), :] = s
        valid_targets_new.append(valid_targets[end])
    valid_targets = np.array(valid_targets_new)
    valid_dataset_new = valid_dataset_new[:, 0::]
    valid_dataset = valid_dataset_new.reshape((np.shape(valid_dataset_new)[0] // rolling_window_size),
                                              rolling_window_size,
                                              np.shape(valid_dataset)[1])
    valid_dataset = valid_dataset.astype(np.float32)
    splits['train'] = DatasetResampler(train_dataset, targets=train_targets, classes=classes, seed=seed)
    splits['valid'] = DatasetResampler(valid_dataset, targets=valid_targets, classes=classes, seed=seed)
    splits['test'] = DatasetResampler(test_dataset, targets=test_targets, classes=classes, seed=seed)
    # splits['train'] = splits['train'].resample(resample_dict)
    return splits, size


# --------------------------------------------------------------------------------------------------------------------
# noinspection PyArgumentList
def get_imbalanced_weights(unique_targets, minority_count=1, min_weight=0.001, max_weight=0.01, seed=0):
    random = np.random.RandomState(seed=seed)
    # 少数类为正样本1
    minority_set = unique_targets[[1]]

    weights = random.rand(minority_count)
    weights = weights * (max_weight - min_weight) + min_weight
    resample_dict = dict(zip(minority_set, weights))

    return resample_dict

# --------------------------------------------------------------------------------------------------------------------
