import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
# HF_HOME
os.environ["HF_HOME"] = "F:/datas/llms"
import torch

from transformers import *
# nlp_sentence_classif=pipeline("sentiment-analysis")
# print(nlp_sentence_classif("我不喜欢这本书。"))
#
# tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
#
# sequence = "Using a Transformer network is simple"
# sequence_ids = tokenizer.encode(sequence)
#
# print(sequence_ids)

# from transformers import AutoTokenizer
#
# tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
#
# sequence = "Using a Transformer network is simple"
# tokens = tokenizer.tokenize(sequence)
#
# print(tokens)

# from transformers import AutoTokenizer
#
# tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
#
# decoded_string = tokenizer.decode([7993, 170, 11303, 1200, 2443, 1110, 3014])
# print(decoded_string)
#
# decoded_string = tokenizer.decode([101, 7993, 170, 13809, 23763, 2443, 1110, 3014, 102])
# print(decoded_string)
#

from transformers import BertTokenizer, BertModel
import torch
from sklearn.metrics.pairwise import cosine_similarity

tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')  #自己会下载
model = BertModel.from_pretrained('bert-base-chinese')

inputs1 = tokenizer('企业服务', return_tensors='pt')
outputs1 = model(**inputs1)
inputs2 = tokenizer('企业民生服务', return_tensors='pt')
outputs2 = model(**inputs2)

# last_hidden_states = outputs.last_hidden_state
# print('last_hidden_states:' ,last_hidden_states)
pooler_output1 = outputs1.pooler_output.reshape(1,-1).tolist()[0]   #拿cls的向量
pooler_output2 = outputs2.pooler_output.reshape(1,-1).tolist()[0]
print(cosine_similarity([pooler_output1],[pooler_output2]))   #缺点是基本上相似度都很高，还是需要fine-tune
