import numpy as np
from tensorflow.python import pywrap_tensorflow
import torch


class GetBertVector(object):
    def __init__(self, vocab_file, model_file, model_type, output_file):
        """
        :param vocab_file: 词典文件完整路径
        :param model_file: 模型文件完整路径
        :param model_type: 模型类型，“tensorflow”或者“pytorch”
        :param output_file: 输出文件完整路径
        """
        self.vocab_file = vocab_file
        self.model_file = model_file
        self.model_type = model_type
        self.output_file = output_file

    def get_vector(self):
        embed_param = None
        if self.model_type is "tensorflow":
            embed_param = self.get_tensorflow_param()
        elif self.model_type is "pytorch":
            embed_param = self.get_pytorch_param()
        embed_param = np.array(embed_param).tolist()
        result = None
        with open(self.vocab_file, 'r', encoding='utf-8') as f:
            result = f.readlines()

        with open(self.output_file, 'w', encoding='utf-8') as f:
            f.write(str(len(embed_param)) + " " + str(len(embed_param[0])) + "\n")
            for index, i in enumerate(result):
                f.write(i.replace("\n", ""))
                f.write(" ")
                f.write(str(embed_param[index])[1:-1].replace(',', ''))
                f.write("\n")

    def get_tensorflow_param(self):
        reader = pywrap_tensorflow.NewCheckpointReader(self.model_file)
        embed_param = reader.get_tensor('bert/embeddings/word_embeddings')
        return embed_param

    def get_pytorch_param(self):
        model = torch.load(self.model_file)
        embed_param = model["bert.embeddings.word_embeddings.weight"]
        return embed_param
