import numpy as np
import os
from gensim.models import KeyedVectors

PAD_WORD = '<pad>'
UNK_WORD = '<unk>'
SOS_WORD = '<s>'
EOS_WORD = '</s>'


class Vocabulary(object):
    def __init__(self):
        self.idx2word = {}
        self.word2idx = {}
        self.frequencies = {}

        # Special entries will not be pruned.

        # special words
        self.special_words = [PAD_WORD,
                              UNK_WORD,
                              SOS_WORD,
                              EOS_WORD]
        for special_word in self.special_words:
            self.add(special_word)
        pass

    def __len__(self):
        return len(self.word2idx)

    @property
    def vocab_size(self):
        return len(self.word2idx)

    def add(self, word, freq=1):
        if word in self.word2idx:
            idx = self.word2idx[word]
        else:
            idx = len(self.idx2word)
            self.idx2word[idx] = word
            self.word2idx[word] = idx
        self.frequencies[word] = self.frequencies.get(word, 0) + freq
        return idx

    def prune(self, size):
        # Only keep the `size` most frequent entries.
        freqs = [[word, self.frequencies[word]] for word in self.word2idx]
        sorted_freqs = sorted(freqs, key=lambda it: it[1], reverse=True)

        new_vocab = Vocabulary()

        # Add special entries in all cases.
        for word in self.special_words:
            new_vocab.add(word)

        for word, freq in sorted_freqs[:size - 4]:
            new_vocab.add(word, freq)

        return new_vocab
        pass

    def save_vocab(self, saved_path):
        with open(saved_path, 'w', encoding='utf-8') as fw:
            for idx in range(len(self.word2idx)):
                word = self.idx2word[idx]
                freq = self.frequencies[word]
                fw.write("{}\t{}\n".format(word, freq))
        pass

    def load_from_file(self, vocab_path):
        with open(vocab_path, 'r', encoding='utf-8') as fr:
            for idx, line in enumerate(fr):
                ss = line.rstrip().split("\t")
                self.idx2word[idx] = ss[0]
                self.word2idx[ss[0]] = idx
                self.frequencies[ss[0]] = int(ss[1])

    def convert2idx(self, words):
        """
        Convert `words` to indices. Use `unkWord` if not found.
        Optionally insert `bosWord` at the beginning and `eosWord` at the .
        """
        vec = []
        unk = self.word2idx[UNK_WORD]
        vec += [self.word2idx.get(word, unk) for word in words]
        return vec

    def process_embeddings(self, path, saved_dir, saved_prefix, vec_size=300):
        if path is not None:
            if not os.path.exists(path):
                raise ValueError("can not find vectors file")
            vectors = np.zeros([self.vocab_size, vec_size], dtype=np.float32)
            w2v = KeyedVectors.load_word2vec_format(path, binary=False, unicode_errors='ignore')
            assert vec_size == w2v.vector_size
            for i in range(self.vocab_size):
                word = self.idx2word[i]
                if word not in w2v:
                    continue
                vectors[i] = w2v[word]
        else:
            vectors = np.random.random([self.vocab_size, vec_size])

        emb_path = os.path.join(saved_dir, 'emb.{}.{}'.format(saved_prefix, vec_size))
        np.save(emb_path, vectors)
        return emb_path


def load_vocab(vocab_file):
    vocab = Vocabulary()
    vocab.load_from_file(vocab_file)
    return vocab


def build_vocab(sentences, saved_dir, emb_path, prefix=None, vocab_size=100000, emb_size=300):
    vocab = Vocabulary()
    sent_set = set([])
    for sent in sentences:
        if " ".join(sent) in sent_set:
            continue
        sent_set.add(" ".join(sent))
        for word in sent:
            vocab.add(word)
    new_vocab = vocab.prune(vocab_size)
    print('prune vocab from {} to {}'.format(vocab.vocab_size, new_vocab.vocab_size))

    emb_path = new_vocab.process_embeddings(emb_path, saved_dir, prefix, emb_size)
    vocab_saved_path = os.path.join(saved_dir, 'vocab.{}.txt'.format(prefix))
    new_vocab.save_vocab(vocab_saved_path)
    return vocab_saved_path, emb_path
