import os.path

import pandas as pd
from gensim.models import KeyedVectors
import numpy as np
import torch
import jieba


class Word2Vec:
    """
    加载预训练好的 word2vec 词向量
    """

    def __init__(self, word2vec_file_pth="/home/public/projects/emotion_dan/dataset/sgns.wiki.char.bin", embed_size=300):
        self.word2vec_file_pth = word2vec_file_pth
        self.embed_size = embed_size
        self.vocab, self.weight = self.get_vocab_weight()

    @staticmethod
    def convert_word2vec_model2binary(in_file=None, out_file=None):
        if in_file is None or os.path.isfile(in_file):
            raise ValueError("文件不存在")
        if out_file is None:
            out_file = in_file + ".bin"
            print("default out file name is ： in file add .bin", out_file)
        wv_from_text = KeyedVectors.load_word2vec_format(in_file, binary=False)
        wv_from_text.init_sims(replace=True)
        wv_from_text.save(out_file)
        print("convert finish ... ")

    def get_model_w2v(self):
        """
        gensim 加载二进制 Word2Vec 模型
        常见模型方法和属性：.vectors 词向量矩阵
                        .index_to_key 词汇列表
        :return: model
        """
        model = KeyedVectors.load(self.word2vec_file_pth, mmap='r')
        model.add_vectors(
            keys=["<unk>"],
            weights=[np.zeros(300)]
        )
        return model

    def get_vocab_weight(self):
        """
        获得 Word2Vec 字典及权重
        :param
            embed_size: 预训练词向量维度
        Example： vocab, weight = get_vocab_weight()
                ids = vocab.convert_tokens_to_ids(['，', '。', '<unk>'])
                print(ids)
                print([weight[id] for id in ids])
        :return: vocab, weight
        """
        model_w2v = self.get_model_w2v()
        vocab = Vocab(model_w2v.index_to_key)
        vocab_size = len(vocab)
        weight = torch.zeros(vocab_size, self.embed_size)
        for i in range(len(model_w2v.vectors)):
            weight[i, :] = torch.from_numpy(model_w2v.vectors[i])

        return vocab, weight

    def get_ids(self, txt):
        """
        获取使用 jieba分词后，词汇id
        Example: "上海的小吃真不错"
        Output: ['上海', '的', '小吃', '真不错']
                [346, 1, 14103, 352221]
        :param txt:
        :return:
        """
        words = list(jieba.cut(txt))
        ids = self.vocab.convert_tokens_to_ids(words)
        return ids

    def embedding(self, txts):
        torch_embedding = torch.nn.Embedding.from_pretrained(self.weight)  # nn.Embedding
        _, indexs = self.get_ids(txts)  # ids
        emd = torch_embedding(torch.IntTensor(indexs))  # embedding
        return torch.sum(emd, 0).numpy()


class Vocab:
    """ vocab dict class
    :ivar tokens : where tokens equal None ,
                   you can call 'build()' to generate tokens
    """

    def __init__(self, tokens=None):
        self.idx_to_token = list()
        self.token_to_idx = dict()

        if tokens is not None:
            if "<unk>" not in tokens:
                tokens = tokens + ["<unk>"]
            for token in tokens:
                self.idx_to_token.append(token)
                self.token_to_idx[token] = len(self.idx_to_token) - 1
            self.unk = self.token_to_idx['<unk>']

    @classmethod
    def build(cls, text, min_freq=1, reserved_tokens=None):
        token_freqs = defaultdict(int)
        for sentence in text:
            for token in sentence:
                token_freqs[token] += 1
        uniq_tokens = ["<unk>"] + (reserved_tokens if reserved_tokens else [])
        uniq_tokens += [token for token, freq in token_freqs.items() if freq >= min_freq and token != "<unk>"]
        return cls(uniq_tokens)

    def __len__(self):
        return len(self.idx_to_token)

    def __getitem__(self, token):
        return self.token_to_idx.get(token, self.unk)

    def convert_tokens_to_ids(self, tokens):
        return [self[token] for token in tokens]

    def convert_ids_to_tokens(self, indices):
        return [self.idx_to_token[index] for index in indices]


if __name__ == '__main__':
    word2vec = Word2Vec()
    emd = word2vec.embedding("，我有点想你了")
    print(emd)
    print(type(emd))
