from transformers import BertModel, BertTokenizer
import torch

cache_path = r"C:\\Users\\jliud\\.cache\\huggingface\\hub"

def test_encode():
    model = BertModel.from_pretrained('bert-base-uncased', cache_dir=cache_path)
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', cache_dir=cache_path)
    sentence = 'I love Paris'
    tokens = tokenizer.tokenize(sentence)
    # 增加[CLS]标记列表头部，增加[SEP]标记列表尾部
    tokens = ['[CLS]'] + tokens + ['[SEP]']
    # tokens: ['[CLS]', 'i', 'love', 'paris', '[SEP]', '[PAD]', '[PAD]']
    tokens = tokens + ['[PAD]'] + ['[PAD]']
    # 创建一个注意力屏蔽attention_mask [1, 1, 1, 1, 1, 0, 0]
    attention_mask = [1 if i != '[PAD]' else 0 for i in tokens]
    # 将所有标记转化为对应的ID [101, 1045, 2293, 3000, 102, 0, 0]
    token_ids = tokenizer.convert_tokens_to_ids(tokens)
    # 通过将token_ids和attention_mask转换为Tensor
    token_ids = torch.tensor(token_ids).unsqueeze(0)    # tensor([[ 101,  100, 8451,  100,  102,    0,    0]]
    attention_mask = torch.tensor(attention_mask).unsqueeze(0)  # tensor([[1, 1, 1, 1, 1, 0, 0]])
    # 将token_ids和attention_mask传入模型得到嵌入表示
    hidden_rep, cls_head = model(token_ids, attention_mask=attention_mask, return_dict=False)
    # print(hidden_rep)
    # 其中打印hidden_rep形状输出为：torch.Size([1, 7, 768]) # [batch_size, sequence_length, hidden_size]
    # cls_head 它包含[CLS]标记的嵌入表示。打印一下它的形状torch.Size([1, 768]) # [batch_size, hidden_size]


def test_full_encode():
    # 和之前不同，这次设置output_hidden_states=True, 这样可以获取所有编码器层的嵌入
    model = BertModel.from_pretrained('bert-base-uncased', cache_dir=cache_path, output_hidden_states=True)
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', cache_dir=cache_path)
    # 预处理操作，分词并且增加[CLS]和[SEP]
    sentence = 'I love Paris'
    tokens = tokenizer.tokenize(sentence)
    tokens = ['[CLS]'] + tokens + ['[SEP]']
    # 使用[PAD]填充，同事定义注意力mask
    tokens = tokens + ['[PAD]'] + ['[PAD]']
    attention_mask = [1 if i != '[PAD]' else 0 for i in tokens]
    # tokens转化为对应的ID
    token_ids = tokenizer.convert_tokens_to_ids(tokens)
    # 转化token_ids和attention_mask到Tensor
    token_ids = torch.tensor(token_ids).unsqueeze(0)
    attention_mask = torch.tensor(attention_mask).unsqueeze(0)
    # 获取嵌入表示
    # last_hidden_state包含所有标记的嵌入表示，但是仅来自最后一个编码器层(encoder 12) torch.Size([1, 7, 768]) # [batch_size, sequence_length, hidden_size]
    # pooler_output 代表从最后的编码器层得到的[CLS]，但进一步地通过一个线性和tanh激活函数(BertPooler)处理。
    # hidden_states包含从所有编码器层得到的所有标记的嵌入表示
    last_hidden_state, pooler_output, hidden_states = model(token_ids, attention_mask=attention_mask, return_dict=False)
    print(last_hidden_state)

def main():
    test_full_encode()

if __name__== "__main__":
    main()