from sklearn.model_selection import train_test_split
from torch.utils.data import (TensorDataset, DataLoader,
                              SequentialSampler, WeightedRandomSampler)
import torch
from torch.autograd import Variable
from tqdm import tqdm
import numpy as np
import pandas as pd
from common.configs.path import paths
from common.configs.tools import label_map, load_json


def text_prepare(text_gram, Y):
    text_gram = np.array(text_gram, dtype=object)

    labels = np.array(Y)
    train_texts = text_gram[:labels.shape[0]]
    test_texts = text_gram[labels.shape[0]:]
    return train_texts, test_texts, labels


def word_idx(texts):
    """Tokenize texts, build vocabulary and find maximum sentence length.

    Args:
        texts (List[str]): List of text data

    Returns:
        word2idx (Dict): Vocabulary built from the corpus
    """
    word2idx = {}

    # Add <pad> and <unk> tokens to the vocabulary
    word2idx['<pad>'] = 0
    word2idx['<unk>'] = 1

    # Building our vocab from the corpus starting from index 2
    idx = 2
    for sent in texts:

        # Add new token to `word2idx`
        for token in sent:
            if token not in word2idx:
                word2idx[token] = idx
                idx += 1

        # Update `max_len`

    return word2idx


def encode(tokenized_texts, word2idx, flip, max_len):
    """Pad each sentence to the maximum sentence length and encode tokens to
    their index in the vocabulary.

    Returns:
        input_ids (np.array): Array of token indexes in the vocabulary with
            shape (N, max_len). It will the input of our CNN model.
    """

    input_ids = []
    for tokenized_sent in tokenized_texts:
        # Pad sentences to max_len
        tokenized_sent += ['<pad>'] * (max_len - len(tokenized_sent))

        if len(tokenized_sent) > max_len:
            tokenized_sent = tokenized_sent[:max_len]

        if flip:
            tokenized_sent_flip = ['<pad>'] * 2 + tokenized_sent[::-1]
            tokenized_sent += tokenized_sent_flip

        # Encode tokens to input_ids
        input_id = np.array([word2idx.get(token) for token in tokenized_sent])
        input_ids.append(input_id)

    return np.array(input_ids)


def load_pretrained_vectors(word2idx, word_embeddings):
    """Load pretrained vectors and create embedding layers.

    Args:
        word2idx (Dict): Vocabulary built from the corpus

    Returns:
        embeddings (np.array): Embedding matrix with shape (N, d) where N is
            the size of word2idx and d is embedding dimension
    """

    print("Loading pretrained vectors...")
    # fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
    word_embeddings = word_embeddings

    d = len(word_embeddings[list(word_embeddings.keys())[0]])

    # Initilize random embeddings
    embeddings = np.random.uniform(-0.25, 0.25, (len(word2idx), d))
    embeddings[word2idx['<pad>']] = np.zeros((d,))

    # Load pretrained vectors
    count = 0
    for word, vector in tqdm(word_embeddings.items()):
        if word in word2idx:
            count += 1
            embeddings[word2idx[word]] = np.array(vector, dtype=np.float32)

    return Variable(torch.from_numpy(embeddings))


def get_class_distribution(classes):
    counter = {cl: 0 for cl in range(0, 35)}
    for cl in classes:
        if cl in counter:
            counter[cl] += 1
    return counter


def to_data_loader(train_inputs, val_inputs, train_labels, val_labels,
                   batch_size):
    """Convert train and validation sets to torch.Tensors and load them to
    DataLoader.
    """

    # Convert data type to torch.Tensor
    train_inputs, val_inputs, train_labels, val_labels =\
        tuple(torch.tensor(data) for data in
              [train_inputs, val_inputs, train_labels, val_labels])

    # Specify batch_size
    batch_size = batch_size

    # Create DataLoader for training data
    train_data = TensorDataset(train_inputs, train_labels)

    target_list = torch.tensor([t for _, t in train_data])
    target_list = target_list[torch.randperm(len(target_list))]

    class_count = [i for i in get_class_distribution(
        train_labels.numpy()).values()]
    class_weights = 1./torch.tensor(class_count, dtype=torch.float)
    class_weights_all = class_weights[target_list]

    weighted_sampler = WeightedRandomSampler(
        weights=class_weights_all,
        num_samples=len(class_weights_all),
        replacement=True)


#     train_sampler = RandomSampler(train_data)
    train_dataloader = DataLoader(
        train_data, sampler=weighted_sampler, batch_size=batch_size, drop_last=True)

    # Create DataLoader for validation data
    val_data = TensorDataset(val_inputs, val_labels)
    val_sampler = SequentialSampler(val_data)
    val_dataloader = DataLoader(
        val_data, sampler=val_sampler, batch_size=batch_size, drop_last=True)

    return train_dataloader, val_dataloader


def flip_text(text_gram):
    return [line[::-1] for line in text_gram]


def load_data(gram, max_len):
    train_df = pd.read_csv(paths['train_data'])
    test_df = pd.read_csv(paths['test_data'])
    train_df.label = train_df.label.apply(lambda e: label_map[e])
    Y = train_df.label.values

    if gram == 1:
        text_ = load_json(paths['output'] / 'corpus_1_gram.json')
        word_embeddings_gram = load_json(
            paths['output'] / 'word_embeddings_1_gram_100.json')
    if gram == 2:
        text_ = load_json(paths['output'] / 'corpus_2_gram.json')
        word_embeddings_gram = load_json(
            paths['output'] / 'word_embeddings_2_gram_100.json')
    if gram == 3:
        text_ = load_json(paths['output'] / 'corpus_3_gram.json')
        word_embeddings_gram = load_json(
            paths['output'] / 'word_embeddings_3_gram_100.json')

    train_texts, test_texts, labels = text_prepare(text_, Y)



    word2idx = word_idx(text_)
    input_ids = encode(train_texts, word2idx, flip=True, max_len=max_len)

    embeddings = load_pretrained_vectors(
        word2idx, word_embeddings_gram)
    return train_texts, input_ids, test_texts, labels, word2idx, embeddings


# def data_prepare(batch_size, gram, train_df):
#     batch_size = batch_size

#     train_df.label = train_df.label.apply(lambda e: label_map[e])
#     Y = train_df.label.values

#     if gram == 1:
#         text_ = load_json(paths['output'] / 'corpus_1_gram.json')
#         word_embeddings_gram = load_json(
#             paths['output'] / 'word_embeddings_1_gram_200.json')
#     if gram == 2:
#         text_ = load_json(paths['output'] / 'corpus_2_gram.json')
#         word_embeddings_gram = load_json(
#             paths['output'] / 'word_embeddings_2_gram_200.json')
#     if gram == 3:
#         text_ = load_json(paths['output'] / 'corpus_3_gram.json')
#         word_embeddings_gram = load_json(
#             paths['output'] / 'word_embeddings_3_gram_200.json')

#     train_texts, test_texts, labels = text_prepare(text_, Y)

#     word2idx = word_idx(text_)
#     input_ids = encode(train_texts, word2idx, flip=True, max_len=64)

#     print(input_ids.shape)
#     embeddings = load_pretrained_vectors(
#         word2idx, word_embeddings_gram)

#     # Train Test Split
#     train_inputs, val_inputs, train_labels, val_labels = train_test_split(
#         input_ids, labels, test_size=0.1, random_state=42)

#     # Load data to PyTorch DataLoader
#     train_dataloader, val_dataloader = data_loader(
#         train_inputs, val_inputs, train_labels, val_labels, batch_size=batch_size)
#     return train_dataloader, val_dataloader, embeddings, word2idx, train_texts, test_texts
