import random
import logging
from pathlib import Path
import json
import pickle
import numpy as np
import torch

seed_num = 42

label_map = {
    "4-7": 0,
    "3-5": 1,
    "2-25": 2,
    "5-35": 3,
    "2-17": 4,
    "5-12": 5,
    "2-6": 6,
    "6-29": 7,
    "5-22": 8,
    "6-34": 9,
    "6-15": 10,
    "6-32": 11,
    "2-33": 12,
    "2-2": 13,
    "1-10": 14,
    "8-27": 15,
    "2-3": 16,
    "2-14": 17,
    "5-30": 18,
    "6-31": 19,
    "6-20": 20,
    "1-1": 21,
    "6-13": 22,
    "2-11": 23,
    "10-26": 24,
    "5-24": 25,
    "1-9": 26,
    "6-8": 27,
    "8-18": 28,
    "1-4": 29,
    "9-23": 30,
    "6-21": 31,
    "6-19": 32,
    "7-16": 33,
    "6-28": 34
}

reversed_label = {id_: label for label, id_ in label_map.items()}


def save_model(model, path):
    with open(path, 'wb') as f:
        pickle.dump(model, f)


def load_model(path):
    with open(path, 'rb') as f:
        model = pickle.load(f)
    return model


def load_json(path):
    with open(path, 'r') as f:
        dict_ = json.load(f)
    return dict_


def save_json(path, dt):
    with open(path, 'w') as f:
        json.dump(dt, f)
    print(path, 'saved.')


def load_npy(path):
    return np.load(path)


def save_text(path, line):
    with open(path, 'r') as f:
        f.write(line)


def comb_vector(w2v_m, ftt_m):
    return {w: (np.concatenate((w2v_m.wv[w], ftt_m.wv[w]))).tolist() for w in w2v_m.wv.vocab.keys()}


def set_seed(seed_value=42):
    """Set seed for reproducibility."""
    random.seed(seed_value)
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)


def predict(tokens, model, word2idx, flip=True, max_len=64):
    """Predict probability that a review is positive."""
    model = model.to("cpu")

    # Tokenize, pad and encode text
    padded_tokens = tokens + ['<pad>'] * (max_len - len(tokens))

    if len(padded_tokens) > max_len:
        padded_tokens = padded_tokens[:max_len]

    if flip:
        padded_tokens += ['<pad>'] * 2 + padded_tokens[::-1]

    input_id = [word2idx.get(token, word2idx['<unk>'])
                for token in padded_tokens]

    input_id = torch.tensor(input_id).unsqueeze(dim=0)

    # Compute logits
    logits = model.forward(input_id)

    return torch.argmax(logits, dim=1).flatten()


# def initilize_model(pretrained_embedding=None,
#                     freeze_embedding=False,
#                     vocab_size=None,
#                     embed_dim=600,
#                     filter_sizes=[3, 4, 5],
#                     num_filters=[100, 100, 100],
#                     num_classes=35,
#                     dropout=0.5,
#                     learning_rate=0.01):
#     """Instantiate a CNN model and an optimizer."""

#     assert (len(filter_sizes) == len(num_filters)), "filter_sizes and \
#     num_filters need to be of the same length."

#     # Instantiate CNN model
#     cnn_model = TextCNN.TextCNN(pretrained_embedding=pretrained_embedding,
#                                 freeze_embedding=freeze_embedding,
#                                 vocab_size=vocab_size,
#                                 embed_dim=embed_dim,
#                                 filter_sizes=filter_sizes,
#                                 num_filters=num_filters,
#                                 num_classes=35,
#                                 dropout=0.5)

#     # Send model to `device` (GPU/CPU)
#     cnn_model.to(device)

#     # Instantiate Adadelta optimizer
#     optimizer = optim.Adadelta(cnn_model.parameters(),
#                                lr=learning_rate,
#                                rho=0.95)

#     return cnn_model, optimizer

#     # takes in a module and applies the specified weight initialization


def weights_init_uniform_rule(m):
    classname = m.__class__.__name__
    # for every Linear layer in a model..
    if classname.find('Linear') != -1:
        # get the number of the inputs
        n = m.in_features
        y = 1.0/np.sqrt(n)
        m.weight.data.uniform_(-y, y)
        m.bias.data.fill_(0)
