import torch
import logging
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM

logging.basicConfig(level=logging.INFO)

# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(r'D:\BaiduNetdiskDownload\预训练模型\huggingface\bert-base-uncased')
text1 = "Here is the sentence I want embeddings for."
text2 = "After stealing money from the bank vault, the bank robber was seen fishing on the Mississippi river bank."
# 给句子头尾加入特殊标记
marked_text = "[CLS] " + text2 + " [SEP]"
# marked_text = "[CLS] " + text1 + " [SEP]" + text2 + " [SEP]"
print(marked_text)
# token初始化
tokenized_text = tokenizer.tokenize(marked_text)
print('token初始化:\n', tokenized_text)

# 查看词汇表部分令牌
print('查看词汇表部分令牌:\n', list(tokenizer.vocab.keys())[6000:6010])

# 匹配句子分词后的token在词汇表中的索引
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
print('分词token索引匹配如下:')
for tp in zip(tokenized_text, indexed_tokens):
    print(tp)

# 用segment id区分句子
# 第一个句子中的每个单词的token位置连同“[SEP]”的位置赋值为0
# 第二个句子中的所有token赋值为1，这里是单句的情况
segments_ids = [1] * len(tokenized_text)
print(segments_ids)

# 将列表转化为PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])

# 载入预训练模型'bert-base-uncased'
model = BertModel.from_pretrained(r'D:\BaiduNetdiskDownload\预训练模型\huggingface\bert-base-uncased')
# 把模型设定为evaluate模式，进行前向传播
model.eval()

# 获取网络的隐藏层状态
"""
这个模型的全部隐藏状态存储在对象“encoded_layers”中:
这个对象有四个维度，顺序如下：
1.层数(12层) --> 编码层的层数
2.batch号(1句) --> 批处理大小，同时向模型提交的句子数目
3.单词/令牌号(在我们的句子中有22个令牌)
4.隐藏单元/特征号(768个特征)
用202,752个值来唯一表示我们的一句话！
"""
with torch.no_grad():
    encoded_layers, _ = model(tokens_tensor, segments_tensors)

# 查看一下给定层和token的值范围
print("Number of layers:", len(encoded_layers))
layer_i = 0
print("Number of batches:", len(encoded_layers[layer_i]))
batch_i = 0
print("Number of tokens:", len(encoded_layers[layer_i][batch_i]))
token_i = 0
print("Number of hidden units:", len(encoded_layers[layer_i][batch_i][token_i]))

# 按token对值分组
# 把隐藏层状态嵌入转化为单个的token向量
# 每个token在不同层都有不同的向量

token_embeddings = []
for token_i in range(len(tokenized_text)):
    hidden_layers = []
    for layer_i in range(len(encoded_layers)):
        vec = encoded_layers[layer_i][batch_i][token_i]
        hidden_layers.append(vec)
    token_embeddings.append(hidden_layers)
# Sanity check the dimensions:
print("Number of tokens in sequence:", len(token_embeddings))
print("Number of layers per token:", len(token_embeddings[0]))

# 为了得到单独的token向量，我们需要组合一些层向量
# 正确的池化策略(平均值、最大值、连接等等)和使用的层
# (最后四层、全部、最后一层等等)依赖于应用
# 这里使用最后四层的连接cat和求和sum来创建单词向量：
concatenated_last_4_layers = [torch.cat((layer[-1], layer[-2], layer[-3], layer[-4]), 0) for layer in token_embeddings] # [number_of_tokens, 3072]
summed_last_4_layers = [torch.sum(torch.stack(layer)[-4:], 0) for layer in token_embeddings] # [number_of_tokens, 768]

for i, x in enumerate(tokenized_text):
    print(i, x)

print("First fifteen values of bank vector in 'bank robber':\n", summed_last_4_layers[10][:15])
print("First fifteen values of bank vector in 'bank vault':\n", summed_last_4_layers[6][:15])
print("First fifteen values of bank vector in 'river bank':\n", summed_last_4_layers[19][:15])

from sklearn.metrics.pairwise import cosine_similarity
# 比较bank在 "bank robber" 和 在"river bank" 中词向量的余弦相似性
different_bank = cosine_similarity(summed_last_4_layers[10].reshape(1, -1), summed_last_4_layers[19].reshape(1, -1))[0][0]
# 比较bank在 "bank robber" 和 在"bank vault" 中词向量的余弦相似性
same_bank = cosine_similarity(summed_last_4_layers[10].reshape(1,-1), summed_last_4_layers[6].reshape(1, -1))[0][0]
print("Similarity of 'bank' as in 'bank robber' to 'bank' as in 'bank vault':\n",  same_bank)
print("Similarity of 'bank' as in 'bank robber' to 'bank' as in 'river bank':\n",  different_bank)
