from transformers import BertTokenizer, BertModel
import torch
import torch.nn.functional as F

# bert_dir =  "/home/Dyf/code/models/pretrain_models/bert"
bert_dir = "/home/Dyf/code/models/pretrain_models/bertcn/bert-base-chinese"

bert_tokenizer = BertTokenizer.from_pretrained(bert_dir)
bert_model = BertModel.from_pretrained(bert_dir)

word2id_bert = bert_tokenizer.get_vocab()
id2word_bert = {word2id_bert[key]: key for key in word2id_bert}

text = """群体到达河畔，沐浴的胖子以及陈越与钟鸣的闲聊排于前景。胖子递给陈越一个自造水具，但钟鸣因见阴影窜出，陈越紧随其后。钟鸣未追获阴影，幸受陈越扶助以避免脚伤。回到河边，胖子和夏荷消失。老练的陈越发现胖子留下的标记，与钟鸣寻得一暂居地。二人犹疑在外，被突然出现的两人用枪指向并带入营地。陈越和钟鸣见到了胖子和教授沈临川，领导着一队装备火器的地质考察队。但陈越对沈临川的操作存在疑虑，因为此地偏僻，他认为沈临川一直秘密跟踪他们。"""
inputs = bert_tokenizer(text, return_tensors='pt', padding=True, truncation=True)

with torch.no_grad():
    outputs = bert_model(**inputs)
    last_hidden_states = outputs.last_hidden_state

cls_token_vector = last_hidden_states[0, 0, :]  # 第一个句子，第一个token的所有隐藏状态
print(cls_token_vector)
# print(id2word_bert[3333])
# print(word2id_bert['朴'])
# print(word2id_bert['素'])
# print(word2id_bert['的'])
# print(word2id_bert['不'])
# print(word2id_bert['简'])
# print(word2id_bert['单'])
s1 = [['朴', '素', '的']]
s2 = [['不', '简', '单']]


def get_sentence_vector(word):
    encoded_input = bert_tokenizer(word, return_tensors='pt')
    outputs = bert_model(**encoded_input)
    word_embeddings = outputs.last_hidden_state[0, :len(bert_tokenizer.tokenize(word)), :]
    return word_embeddings


def get_sentence_vectors(texts):
    word_embeddings_list = []
    for text in texts:
        print(len(text))
        encoded_input = bert_tokenizer(text, return_tensors='pt')
        outputs = bert_model(**encoded_input)
        word_embeddings = outputs.last_hidden_state[0, :len(text), :]
        word_embeddings_list.append(word_embeddings)
    return torch.cat(word_embeddings_list).mean(dim=0).unsqueeze(dim=0)


ss1 = s1[0]
ss2 = s2[0]
# print(get_sentence_vectors(ss1), get_sentence_vectors(ss2))
# # 计算每对向量之间的余弦相似度
# print(len(ss1),len(ss2),get_sentence_vectors(ss1).shape,get_sentence_vectors(ss2).shape)
cosine_sims = F.cosine_similarity(get_sentence_vectors(ss1), get_sentence_vectors(ss2), dim=1)
print(cosine_sims)
