# -*- coding: utf-8 -*-
# @Time    : 2021/11/27 14:43
# @Author  : zhangyi
# @FileName: bert_embedding.py
# @Software: vscode

'''
使用bert获得bert的输出，取出指定隐藏层输出加和作为文本表示送入下游fine-tune
扩展ing

'''

import pickle
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel
)
import torch 
import os
device=torch.device('cuda')
os.environ["CUDA_VISIBLE_DEVICES"]='2'
n_gpu=torch.cuda.device_count()


if __name__=="__main__":
    tokenizer=BertTokenizer.from_pretrained('chinese_L-12_H-768_A-12')
    input1=tokenizer('今天天气不错', return_tensors="pt", padding='max_length', max_length=30, truncation=True)
    input2=tokenizer('今天天气不错', return_tensors="pt", padding='max_length', max_length=30, truncation=True)
    input_ids, token_type_ids, attention_mask=[],[],[]
    input_ids.append(input1['input_ids'])
    token_type_ids.append(input1['token_type_ids'])
    attention_mask.append(input1['attention_mask'])
    
    input_ids.append(input2['input_ids'])
    token_type_ids.append(input2['token_type_ids'])
    attention_mask.append(input2['attention_mask'])
    
    
    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    
    print(input_ids.shape)  # torch.Size([2, 30])
    bert=BertModel.from_pretrained('chinese_L-12_H-768_A-12').to(device)
    out=bert(input_ids=input_ids.to(device),
            token_type_ids=token_type_ids.to(device),
            attention_mask=attention_mask.to(device),
            output_hidden_states=True,
            return_dict=True)
    
    print(type(out.last_hidden_state),type(out.pooler_output),type(out.hidden_states))
    print(out.last_hidden_state.shape)
    print(len(out.hidden_states))  # 13 layers
    print(out.hidden_states[0].shape)  # bert embedding output
    print(out.hidden_states[1][0].shape)  # 1-12: bert layer
    # out.hidden_states[1]：[batch size,seq_len,hidden_dim]
    token_vecs=out.hidden_states[12][0]  # [30,768]
    sentence_embedding = torch.mean(token_vecs, dim=0)  # torch.Size([768])
    print(sentence_embedding.shape)  # 把每个token 的embedding 加和取均值得到sentence embedding
    print(out.hidden_states[1].shape,out.hidden_states[1][:,0].shape)
    layer1=out.hidden_states[1][:,0]  # torch.Size([2, 768])
    layer2=out.hidden_states[2][:,0]  # torch.Size([2, 768])
    layer3=out.hidden_states[3][:,0]  # torch.Size([2, 768])
    layer4=out.hidden_states[4][:,0]  # torch.Size([2, 768])
    top4_layer=torch.cat((layer1,layer2,layer3,layer4),dim=0).sum(0)
    top4_layer=torch.cat((layer1,layer2,layer3,layer4),dim=0).mean(0)
    print(top4_layer.shape)  # [4,2,768]
    
    
    


    
    