"""
@Filename       : text_encoder.py
@Create Time    : 2021/9/26 16:58
@Author         : Rylynn
@Description    : 

"""
import pickle

import torch
import torch as th
import torch.nn as nn

from transformers import BertTokenizer, BertModel


class BertTextEncoder(nn.Module):
    def __init__(self, config):
        super(BertTextEncoder, self).__init__()

        if config['dataset'] == 'dblp':
            self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
            self.bert_model = BertModel.from_pretrained("bert-base-chinese")
        else:
            self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
            self.bert_model = BertModel.from_pretrained("bert-base-uncased")

        self.fc = nn.Linear(768, config['embed_dim'], bias=False)

    def forward(self, text):
        # print(text)
        tokens = self.tokenizer(text, padding=True, truncation=True, max_length=100, return_tensors='pt')

        tokens = {k: v.cuda() for k, v in tokens.items()}

        encodings = self.bert_model(**tokens)
        # print(encodings)

        # print(torch.cosine_similarity(encodings[1][0:1], encodings[1][1:2]))
        return self.fc(encodings[1])


if __name__ == '__main__':
    encoder = BertTextEncoder({'dataset': 'weibo','word_embed_size': 64})
    res = encoder(['今天是星期几', '星期天'])

