import string
from collections import Counter

import numpy as np
import scipy.spatial.distance
import torch
import torch.optim as opt
import torch.utils.data as tud
from zhon.hanzi import punctuation

from wordembedding import WordEmbeddingDataset, WordEmbeddingModel

data = {
    "en": './data/en.txt',
    "zh": './data/zh.txt',
}


def find_nearest(ew, word, k):
    """
    find the k nearest words of the target word by calculate the cosine distance of the embedding weights
    :param ew: embedding weights
    :param word: target word
    :param k: is k
    :return: k nearest words
    """
    index = word2index[word]
    cos_dis = np.array([scipy.spatial.distance.cosine(ew[index], element) for element in ew])
    sort_indexes = cos_dis.argsort()[1:min(k + 1, MAX_VOCAB_SIZE)]
    return [index2word[index] for index in sort_indexes]


if __name__ == "__main__":

    epochs = 2
    MAX_VOCAB_SIZE = 10000
    EMBEDDING_SIZE = 100
    batch_size = 32
    lr = 0.2
    K = 10

    with open(data['zh'], 'r', encoding='utf-8') as data_file:
        zh_text = data_file.read()

    with open(data['en'], 'r', encoding='utf-8') as data_file:
        en_text = data_file.read()

    text = zh_text
    # initialization
    en_punctuation = string.punctuation
    zh_punctuation = punctuation
    for i in en_punctuation:
        text = text.replace(i, '')
    for i in zh_punctuation:
        text = text.replace(i, '')
    text = text.lower().split()
    # create a vocabulary dictionary
    vocabulary_dict = dict(Counter(text).most_common(MAX_VOCAB_SIZE - 1))

    # if words appear too less in the corpus, we change it to <UNK>
    vocabulary_dict['<UNK>'] = len(text) - np.sum(list(vocabulary_dict.values()))
    index2word = [word for word in vocabulary_dict.keys()]
    word2index = {word: i for i, word in enumerate(index2word)}
    word_counts = np.array([count for count in vocabulary_dict.values()])
    word_frequencies = word_counts / np.sum(word_counts)
    word_frequencies = word_frequencies ** (3. / 4.)  # thesis recommends it

    # create a word embedding dataset to create the dataloader
    ds = WordEmbeddingDataset(text=text,
                              word2index=word2index,
                              index2word=index2word,
                              word_counts=word_counts,
                              word_frequencies=word_frequencies)
    dl = tud.DataLoader(ds, batch_size, shuffle=True)

    model = WordEmbeddingModel(MAX_VOCAB_SIZE, EMBEDDING_SIZE)
    optimizer = opt.Adam(lr=0.001, params=model.parameters())

    # try to read pth file, if none, start training model
    try:
        model.load_state_dict(torch.load('embedding-{}.pth'.format(EMBEDDING_SIZE)))
    except IOError:
        print('find model file failed, start to train model')
        for epoch in range(epochs):
            for i, (input_labels, pos_labels, neg_labels) in enumerate(dl):
                input_labels = input_labels.long()
                pos_labels = pos_labels.long()
                neg_labels = neg_labels.long()

                # clear the optimizer
                optimizer.zero_grad()

                loss = model(input_labels, pos_labels, neg_labels).mean()
                loss.backward()

                optimizer.step()

                if i % 100 == 0:
                    print('epoch', epoch, 'iteration', i, loss.item())

    # embedding weights are the weights of the model to calculate the cosine distance of each word
    embedding_weights = model.input_embedding()
    # save model
    torch.save(model.state_dict(), 'embedding-{}.pth'.format(EMBEDDING_SIZE))
    print('{}: {}'.format('\'我们\'', find_nearest(embedding_weights, '我们', K)))
