#!/usr/bin/env python 
# -*- coding:utf-8 -*-
'''
@File    :   bert_embedding.py    
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/4/22 0022 15:03   st      1.0         None
'''
import time
#
# from bert.word_embedding import get_word_embedding
from process import similar_process as sp
from similar import sim_cos_utils

from bert_serving.client import BertClient
bc = BertClient()

# t1 = time.time()
# res = bc.encode(['上海'])
# print(res)
# print('-----bert-sim:', time.time() - t1)


# def get_bert_sim_avge(words_list):
#     words_tuple = sp.create_words_tuple(words_list)
#     tupe_list = tuple_list(words_tuple)
#     words_voc_list = bc.encode(tupe_list)
#     words_voc_tuple = create_vocs_tuple(words_voc_list, len(tupe_list))
#     words_tuple_len = len(words_voc_tuple)
#     sim = 0
#     for voc1, voc2 in words_voc_tuple:
#         sim += sim_cos_utils.similarity_cosine(voc1, voc2)
#     return sim/words_tuple_len


def tuple_list(words_tuple):
    temp_list = []
    for word1, word2 in words_tuple:
        temp_list.append(word1)
        temp_list.append(word2)
    return temp_list


def create_vocs_tuple(voc_list, voc_list_len=0):
    """
    候选词组生成字词对计算相似度
    :param words_list:
    :return:
    """
    temp_list = []
    for i in range(0, voc_list_len, 2):
        temp_list.append((voc_list[i], voc_list[i+1]))
    return temp_list


def temp_word_dict(data_list):
    all_list = set()
    for words, _ in data_list:
        words_tuple = sp.create_words_tuple(words)
        words_list = tuple_list(words_tuple)
        all_list.update(words_list)
    all_list = list(all_list)
    words_voc_list = bc.encode(all_list)
    voc_dict = dict()
    for index, word in enumerate(all_list):
        voc_dict[word] = words_voc_list[index]
    return voc_dict


def get_bert_sim(word_1, word_2, voc_dict={}):
    # t1 = time.time()
    # word_voc_1 = get_word_embedding(word_1)
    # word_voc_2 = get_word_embedding(word_2)
    if not voc_dict:
        word_voc_1 = bc.encode([word_1])
        word_voc_2 = bc.encode([word_2])
    else:
        word_voc_1 = voc_dict.get(word_1)
        word_voc_2 = voc_dict.get(word_2)
    sim = sim_cos_utils.similarity_cosine(word_voc_1, word_voc_2)
    return sim


if __name__ == '__main__':
    get_bert_sim('上海', '北京')

