# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   Python 3.9.4
# @Software:  PyCharm
# @FileName:  cosine_similarity.py
# @CTime:     2021/4/20 16:01   
# @Author:    xxx
# @Email:     xxx
# @UTime:     2021/4/20 16:01
#
# @Description:
#     xxx
#
import torch
import logging
import numpy as np
from typing import List, Dict
from transformers import AutoModel, AutoTokenizer
from sklearn.metrics.pairwise import cosine_similarity

logger = logging.getLogger(__name__)


def main(model_name, text):
    bert = AutoModel.from_pretrained(model_name)
    bert.eval()
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    text = tokenizer.batch_encode_plus(text, padding=True, return_tensors='pt')
    outputs = bert(text['input_ids'], attention_mask=text['attention_mask'], output_hidden_states=True, return_dict=True)

    hs = outputs['hidden_states']
    outs = []
    for i in hs:
        cls_out = i[:,0,:].detach().numpy()
        mean_out = torch.mean(i, dim=-2).detach().numpy()

        outs.append( {
            'cls': np.around(cosine_similarity(cls_out), 4),
            'mean': np.around(cosine_similarity(mean_out), 4),
        })
    return outs


if __name__ == '__main__':
    model_name = 'hfl/chinese-macbert-base'
    text = [
        '狗',
        '猫',
        '狗不理包子',
        '幸运草',
    ]

    output = main(model_name, text)
    for i, o in enumerate(output, 0):
        if i == 0: continue
        print(f'第{i}层：') # 第0层是embedding层
        print(f"cls cosine_similarity:\n {o['cls']}\n\n"
              f"mean cosine_similarity:\n {o['mean']}")


# cls cosine_similarity:
#  [[1.     0.9514 0.8625 0.8639]
#  [0.9514 1.     0.8213 0.8352]
#  [0.8625 0.8213 1.     0.8828]
#  [0.8639 0.8352 0.8828 1.    ]]
#
# mean cosine_similarity:
#  [[1.     0.9792 0.7738 0.8728]
#  [0.9792 1.     0.7474 0.862 ]
#  [0.7738 0.7474 1.     0.8295]
#  [0.8728 0.862  0.8295 1.    ]]

# 有个结论，bert预训练出来的向量都很相似