#coding:utf-8
import os
import sys
import json
import jieba
import jieba.posseg as pseg
import numpy as np
import requests
from sklearn.metrics.pairwise import cosine_similarity, paired_distances
import time

base_path = os.path.abspath(os.path.dirname(os.getcwd()))
sys.path.append(base_path)
from utils.embedding.embedding_client import SentenceEmbedding # 新版 group_id


def word2vec(text):
    '''tencent word embedding'''
    headers = {'Content-Type': 'application/json',"connection":"close"}
    try:
        response = requests.post(url='http://10.13.40.138:8060/embedding/',
                                headers=headers, 
                                data=json.dumps({"text":text}))
        ret = response.json()['ret']
        return ret['sentence_embedding'], ret['tokens'], ret['word_embedding']
    except Exception as e:
        print(e)
    
    return [], [], []

def __jaccard_distance(list_1, list_2):
    '''计算两个list的jaccard距离'''

    if not list_1 or not list_2: return 0.0

    set_1 = set(list_1)
    set_2 = set(list_2)

    inter = float(len(set_1 & set_2))
    union = float(len(set_1 | set_2))

    return inter/union

def __jaccard_with_weights(title_split, query_weights, cand_title_split, cand_weights):
    #title_split      = title_split.split()
    #cand_title_split = cand_title_split.split()
    token_weights=dict(dict(zip(title_split, query_weights)), **dict(zip(cand_title_split, cand_weights)))

    tokens_q = set(title_split)
    tokens_t = set(cand_title_split)

    inter = tokens_q & tokens_t
    union = tokens_q | tokens_t

    if not inter:   return 0.0
    # tqr&ctr
    '''
    top_score = sum([token_weights[tk] for tk in inter])
    q_score = sum([token_weights[tk] for tk in tokens_q])
    t_score = sum([token_weights[tk] for tk in tokens_t])

    score = (top_score/q_score)*(top_score/t_score)
    '''
    score = sum([token_weights[tk] for tk in inter])/sum([token_weights[tk] for tk in union])

    return score

#加载自定义词典
#jieba.load_userdict(os.path.join(base_path,'data/custom_dict.txt'))
jieba.load_userdict(os.path.join(base_path,'data/new_custom.txt'))

def __split(title):
    postag_weights = {"nr":2.0, "ns":2.0, "nt":2.0, "nrt":2.0,"eng":2.0,"m":2.0}
    # 保留
    tokens_postag = {word:flag for word, flag in pseg.cut(title) if len(word)>1 or 'n' in flag or 'm' in flag}
    if not tokens_postag:
        tokens_postag = {word:flag for word, flag in pseg.cut(title) if 'x' not in flag}
    return tokens_postag, []

embedding_client = SentenceEmbedding()

if len(sys.argv) < 2:
    print("need text to split")
    exit()

_A = sys.argv[1]
_B = ''
if len(sys.argv)>2:	_B = sys.argv[2]

print('split text: ', _A)
if _B:	print('split text: ', _B)

tokens_postag, tokens_weights = __split(_A)
print(tokens_postag)

if not _B:	exit()
st = time.time()
_vector_a = embedding_client.inference(_A)
st1 = time.time()
_vector_b = embedding_client.inference(_B)
st2 = time.time()

print("embedding cost: {}\t{}".format(1000*(st1-st), 1000*(st2-st1)))
tokens_postag_b, tokens_weights_b = __split(_B)
print(tokens_postag_b)

_tc_vector_a, _, _ = word2vec(_A)
_tc_vector_b, _, _ = word2vec(_B)

print('jaccard distance: ', __jaccard_distance(tokens_postag.keys(),tokens_postag_b.keys()))
print('cosine  distance: ', np.dot(_vector_a, _vector_b))
print('tx cosine  distance: ',cosine_similarity([_tc_vector_a], [_tc_vector_b])[0][0])
