import numpy as np
import itertools
import os
import sys
from sklearn.metrics.pairwise import cosine_similarity

try:
    from gensim.models import KeyedVectors
except ImportError:
    from gensim.models import Word2Vec as KeyedVectors

import six
from gensim.scripts.glove2word2vec import glove2word2vec


def txt2bin(filename):
    m = KeyedVectors.load_word2vec_format(filename)
    m.vocab[next(six.iterkeys(m.vocab))].sample_int = 1
    m.save(filename.replace('txt', 'bin'), separately=None)


def generate(path):
    glove_vector_file = os.path.join(path, 'glove.6B.300d.txt')
    output_model_file = os.path.join(path, 'glove.6B.300d.model.txt')
    # glove2word2vec(glove_vector_file, output_model_file)
    txt2bin(output_model_file)


class Embedding(object):
    def __init__(self, path):
        self.m = KeyedVectors.load(os.path.join(path, 'glove.6B.300d.model.bin'), mmap='r')
        try:
            self.unk = self.m.vectors.mean(axis=0)
        except AttributeError:
            self.unk = self.m.syn0.mean(axis=0)

    @property
    def w2v(self):
        return np.concatenate((self.m.syn0, self.unk[None,:]), axis=0)

    def __getitem__(self, key):
        try:
            return self.m.vocab[key].index
        except KeyError:
            return len(self.m.syn0)

    def vec(self, key):
        try:
            vectors = self.m.vectors
        except AttributeError:
            vectors = self.m.syn0
        try:
            return vectors[self.m.vocab[key].index]
        except KeyError:
            return self.unk

def cos_sim(a, b):
    dot_product = np.dot(a, b)
    norm_a = np.linalg.norm(a)
    norm_b = np.linalg.norm(b)
    return dot_product / (norm_a * norm_b)

def embedding_metrics(hypothesis, references, embedding_array):
    emb_hyps = []
    avg_emb_hyps = []
    extreme_emb_hyps = []
    for hyp in hypothesis:
        embs = np.array([embedding_array.vec(word) for word in hyp])
        avg_emb = np.sum(embs, axis=0) / np.linalg.norm(np.sum(embs, axis=0))
        assert not np.any(np.isnan(avg_emb))

        maxemb = np.max(embs, axis=0)
        minemb = np.min(embs, axis=0)
        extreme_emb = np.array(list(map(lambda x, y: x if ((x>y or x<-y) and y>0) or ((x<y or x>-y) and y<0) else y, maxemb, minemb)))

        emb_hyps.append(embs)
        avg_emb_hyps.append(avg_emb)
        extreme_emb_hyps.append(extreme_emb)

    emb_refs = []
    avg_emb_refs = []
    extreme_emb_refs = []
    for ref in references:
        embs = np.array([embedding_array.vec(word) for word in ref])
        avg_emb = np.sum(embs, axis=0) / np.linalg.norm(np.sum(embs, axis=0))
        assert not np.any(np.isnan(avg_emb))

        maxemb = np.max(embs, axis=0)
        minemb = np.min(embs, axis=0)
        extreme_emb = np.array(list(map(lambda x, y: x if ((x>y or x<-y) and y>0) or ((x<y or x>-y) and y<0) else y, maxemb, minemb)))

        emb_refs.append(embs)
        avg_emb_refs.append(avg_emb)
        extreme_emb_refs.append(extreme_emb)

    avg_cos_similarity = np.array([cos_sim(hyp,ref) for hyp,ref in zip(avg_emb_hyps,avg_emb_refs)])
    avg_cos_similarity = avg_cos_similarity.mean()
    extreme_cos_similarity = np.array([cos_sim(hyp, ref) for hyp, ref in zip(extreme_emb_hyps, extreme_emb_refs)])
    extreme_cos_similarity = extreme_cos_similarity.mean()

    scores = []
    for emb_ref, emb_hyp in zip(emb_refs, emb_hyps):
        simi_matrix = cosine_similarity(emb_ref, emb_hyp)
        dir1 = simi_matrix.max(axis=0).mean()
        dir2 = simi_matrix.max(axis=1).mean()
        scores.append((dir1+dir2)/2)
    greedy_scores = np.mean(scores)

    return avg_cos_similarity, extreme_cos_similarity, greedy_scores


if __name__ == "__main__":
    generate(sys.argv[1])
