# coding:utf-8
import re
import json
import time
import redis
import numpy as np
from collections import Counter
from utils.tool import FpTool
from utils.features_extract import FeatureExtract
from utils.regulation import olympic_regulation,is_olympic,is_russia, russia_event_day, is_mu5735,long_news_spam
from utils.opensearch_client import OpensearchClient
from utils.search_client import SearchClient
from utils.setting import OPENSEARCH_FIELDS
import datetime
import random
import func_timeout

class SimilaritySearch(FpTool):
    '''排重 召回模块'''
    def __init__(self, index_name="group_id_online", logger="",is_debug=False):
        super(SimilaritySearch, self).__init__(logger, write_log_to_kafka = not is_debug)
        self.is_debug = is_debug
        if self.is_debug:
            self.debug_index = 'test_group_id'
            index_name = self.debug_index
        self.search_client = OpensearchClient(index=index_name)
        self.search_client_new = SearchClient(index=index_name)
        self.text_sim_docs_num = 50
        self.vec_sim_docs_num = 50
        # 必须字段
        self.fields={
            "title",
            "title_split",
            "title_entity",
            "keywords",
            "classes",
            "group_id",
            "group_id_v1",
            "ctime",
            "dataid"
        }

    def __call__(self, data):
        return self.search(data)
    
    @FpTool.cost
    def search(self, data):
        def only_search_root(data):
            '''簇心策略'''
            query_title = data.get('title','')
            keywords    = data.get('keywords', '')
            classes     = data.get('classes', '')
            if is_olympic(query_title): return True
            if is_russia(query_title + keywords):       return True
            if is_mu5735(query_title + keywords):       return True
            if classes in ["新冠肺炎","体育", "财经", "天气", "时政", "数码", "情感", "军事"]:  return  True
            if data.get('content_type') != -1:  return True
            if data.get('isHotNews'):   return True
            return False
        
        title_split = data.get('title_split','')
        query_title = data.get('title','')
        keywords    = data.get('keywords', '')
        word_filter = data.get('word_filter', '')
        title_entity= data.get('title_entity', '')
        query_embedding  = data.get('embedding')
        query_embedding_v1  = [float(i) for i in data.get('embedding_v1','').split(',') if i]
        search_time_from = data.get("time_from")
        if not title_split and not keywords and not query_embedding:
            self.logger("search query empty!  {}".format(json.dumps(data, ensure_ascii=False)))
            return []
        ## 冬奥会过滤策略
        sports = data.get('sports')
        athlete = data.get('athlete')
        ## 是否只召回簇心  【召回簇心可以解决簇发散问题， 但是会减少召回出现同质化case】
        is_cluster_root, is_russia_war = only_search_root(data), False
        ## 簇心策略 【冬奥会、俄乌问题】
        if is_cluster_root:
            self.logger("hit olympic/russia! q title:{} dataid:{} set is_cluster_root=True".format(query_title, data.get('dataid')))
            day_filter_words = russia_event_day(query_title) ## 俄乌战争播报 第几天， 如果出现明显日期进行过滤
            if day_filter_words:
                is_russia_war = True    
                self.logger("hit russia war! q title:{} dataid:{} filter_words:{}".format(query_title, data.get('dataid'), day_filter_words))
        try:
            qctime = int(data['ctime'])
        except:
            qctime = int(time.time())

        qctime_date = datetime.datetime.fromtimestamp(qctime)
        similarity_cands = []
        # 疫情类簇心策略优化
        covid_flag = bool(data.get('classes', '') == "新冠肺炎")
        is_title_cluster_root = False if covid_flag else is_cluster_root
        try:
            #  标题搜索
            title_query_dsl = self.search_client_new.build_dsl(field="title_split", value=title_split, size=self.text_sim_docs_num, search_type="match", time_from=search_time_from, is_cluster_root=is_title_cluster_root)
            #  keywords搜索
            keywords_query_dsl = self.search_client_new.build_dsl(field="keywords", value=keywords, size=20, search_type="match", time_from=search_time_from, is_cluster_root=is_cluster_root)
            # 候选集搜索
            _retry = 3
            _sucess = False
            while _retry > 0 and not _sucess:
                try:
                    _st1 = time.time()
                    similarity_cands = self.search_client_new.msearch([title_query_dsl, keywords_query_dsl])
                    self.logger("@@@@@@@ msearch costs {:.4f}ms".format(1000*(time.time() - _st1)))
                    _sucess = True
                except func_timeout.exceptions.FunctionTimedOut:
                    self.logger("msearch tiemout! retry:{}\tdataid:{}\tdsl:{}".format(_retry, data.get('dataid'),[title_query_dsl, keywords_query_dsl]))
                    time.sleep(0.005)
                _retry -= 1
        except Exception as e:
            self.logger("cands search error! {}\tmsg:{}".format(json.dumps(data, ensure_ascii=False)), e)

        # 向量检索
        _retry = 3
        _sucess = False
        while _retry > 0 and not _sucess:
            try:
                _st1 = time.time()
                vcands = self.search_client_new.vector_search(vector=query_embedding, topk=self.vec_sim_docs_num)
                self.logger("@@@@@@@ vector search costs {:.4f}ms".format(1000*(time.time() - _st1)))
                similarity_cands.append(vcands)
                _sucess = True
            except func_timeout.exceptions.FunctionTimedOut:
                self.logger("vector search tiemout! retry:{}\tdataid:{}".format(_retry, data.get('dataid')))
                time.sleep(0.005)
            _retry -= 1

        # 搜索重试
        if not similarity_cands:
            self.logger("similarity cands empty! retry! dataid:{}".format(data.get('dataid'))) 
            try:
                similarity_cands = [self.search_client_new.search(body=title_query_dsl)['hits']['hits']] 
            except Exception as e:
                self.logger("search retry error! dsl:{} msg:{}".format(title_query_dsl, e))
            if not similarity_cands:    self.logger("similarity cands empty! retry fail! dataid:{}".format(data.get('dataid'))) 

        cands_docs, rsource = [], ["tsearch", "ksearch", "vsearch"]
        for i, cands in enumerate(similarity_cands):
            for cand in cands:
                title = cand['_source']['title']
                if len(title) < 4:      continue   ## 去除超短标题干扰
                #self.logger("origin search recall q dataid:{} title:{} cand title:{} cand dataid:{} rsource:{}".format(data.get('dataid'), query_title, title, cand['_source'].get('dataid'),rsource[i]))
                tmp = self.__NorData(cand['_source'], self.fields)
                _cand_title_entity = cand['_source'].get('title_entity','').split()
                _cand_keywords = cand['_source'].get('keywords','').split()
                ## 俄乌战争播报 过滤策略
                if is_russia_war and not (set(russia_event_day(title)) & set(day_filter_words)):
                    self.logger("hit russia war filter! q title:{} cand title:{} cand dataid:{} filter:{}".format(query_title, title, cand['_source'].get('dataid'), day_filter_words))
                    continue
                ## 实体词过滤策略
                if word_filter and any(_cand_title_entity) and not (set(word_filter) & set(_cand_title_entity)) and not (set(word_filter) & set(_cand_keywords)):
                    if covid_flag:
                        # 如果为疫情文章query带地域，候选标题也带地域且不同则过滤
                        if [i for i in _cand_keywords if 'ns:' in i]:
                            continue
                    else:continue
                    #self.logger("hit word filter! q title:{} cand title:{} cand dataid:{}".format(query_title, title, cand['_source'].get('dataid')))
                ## 冬奥会过滤策略【候选集(存在运动项目或者运动员)必须和当前文章 相同的运动员名字 或者 相同的运动项目】
                if sports or athlete:
                    cand_sports, cand_athlete = olympic_regulation(title)
                    # 条件1 当前文章、候选文章 都包含运动项目 --》 如果项目不相同 则过滤
                    # 条件2 当前文章、候选文章 都包含运动员   --》 如果运动员不相同 则过滤   满足一个条件就过滤
                    if (sports and cand_sports and not (set(sports) & set(cand_sports))) or (athlete and cand_athlete and not (set(athlete) & set(cand_athlete))):
                        self.logger("hit olympic filter! q title:{} cand title:{} cand dataid:{}".format(query_title, title, cand['_source'].get('dataid')))
                        continue
                ## 体育比赛策略
                sportMatch = data.get('sport_match')
                sport_ratio = 1.0
                if sportMatch:
                    score = sportMatch.get('score',[])
                    team = sportMatch.get('team',[])
                    uscore = len(set(score) & set(_cand_title_entity))
                    uteam = len(set(team) & set(_cand_title_entity))
                    if uscore != 0 and uteam != 0:
                        ## 同时有相同的  比分 球队 
                        sport_ratio = 2.0
                    elif uscore != 0:
                        ## 比分很重要
                        sport_ratio = 1.1
                    elif uteam != 0:
                        ## 相同球队有一个以上 加权
                        if uteam > 1:   sport_ratio = 1.1
                    else:
                        ## 比分和球队都不相同 降权
                        sport_ratio = 0.8
                    ## 候选集的比分 和 当前文章比分不同 降权! 解决赛中赛果混淆
                    if re.search("\d+比\d+", cand['_source'].get('title_entity','')) and uscore == 0:
                        sport_ratio = 0.7
                try:
                    cand_ctime = int(cand['_source']['ctime'])
                except:
                    cand_ctime = int(time.time())
                cand_ctime_date = datetime.datetime.fromtimestamp(cand_ctime)
                diff_days = abs((qctime_date - cand_ctime_date ).days)
                __time_score = self.function_score(diff_days)  # 时间系数
                tmp['__time_score'] = __time_score
                tmp['sport_ratio'] = sport_ratio
                cand_embedding = [float(i) for i in cand['_source'].get('embedding_v1','').split(',') if i]
                tmp["distance"] = np.dot(query_embedding, cand['_source']['embedding']) * __time_score * sport_ratio
                try:
                    if len(query_embedding_v1) != 128 or len(cand_embedding) != 128:
                        tmp["distance_v1"] = 0
                    else:
                        tmp["distance_v1"] = self.cosine(query_embedding_v1, cand_embedding) * __time_score * sport_ratio
                except:
                    tmp["distance_v1"] = 0
                if tmp["distance"] < 0.3:   continue   ## 轻微卡一下
                try:
                    tmp["jaccard"] = self.jaccard_distance(title_split.split(), cand['_source']['title_split'].split()) * __time_score * sport_ratio
                except:
                    tmp["jaccard"] = 0.0
                try:
                    tmp['keywords_score'] = self.jaccard_distance(keywords.split(), cand['_source'].get('keywords','').split()) * __time_score * sport_ratio
                except:
                    tmp['keywords_score'] = 0.0
                if rsource[i] == 'vsearch' and is_cluster_root and (tmp['distance'] < 0.8 and tmp["jaccard"] < 0.25):        continue  ## 簇心策略 【防止非常相似非簇心文章不能召回】
                if covid_flag and rsource[i] == 'tsearch' and is_cluster_root and not cand['_source'].get('root',False) and tmp["jaccard"] < 0.65: continue ## 疫情簇心策略 【防止非常相似非簇心文章不能召回】
                tmp['score'] = tmp['distance'] + tmp['jaccard'] + tmp['keywords_score']
                tmp['score_v1'] = tmp['distance_v1'] + tmp['jaccard'] + tmp['keywords_score']
                if tmp['score'] < 0.7:  continue  ## 轻微卡一下
                tmp['rsource'] = rsource[i]
                _lcsubstr, _length = self.find_lcsubstr(query_title, title)
                if _lcsubstr:   tmp['lcsubstr'] = _lcsubstr
                if _length >= 4 and self.is_chinese(_lcsubstr):    tmp["jaccard"] *=(1+(_length/(min(len(query_title), len(title)))))
                self.logger("search recall data:{}".format(json.dumps(tmp, ensure_ascii=False)))
                cands_docs.append(tmp)

        return cands_docs

    @FpTool.cost
    def insert(self, data):
        try:
            _ndata = self.__NorData(data, OPENSEARCH_FIELDS)
            # 插入 opensearch 库
            #st = time.time()
            ret = "default"
            if self.is_debug:
                ret = self.search_client.insert(_ndata, id=_ndata['dataid'], index_name=self.debug_index)
            else:
                try:
                    ctime = int(data.get('ctime', time.time()))
                except:
                    ctime = int(time.time())
                if abs(time.time() - ctime) < 3600*24*30:
                    format_ctime = time.localtime(ctime)
                    index_name = "group_id_{}_{:0>2d}".format(format_ctime.tm_year, format_ctime.tm_mon) # 按月建索引
                    ret = self.search_client.insert(_ndata, id=_ndata['dataid'], index_name=index_name)
                    ret = self.search_client_new.insert(_ndata, id=_ndata['dataid'], index_name=index_name) ## 双写
                else: # 超过半年的文章不入索引
                    self.logger("old news skip insert opensearch! data:{}".format(data))
            #print("insert costs {:.4f}".format(1000*(time.time()-st)))
            self.logger("insert data to opensearch:{} data:{}".format(ret, _ndata))
        except Exception as e:
            self.logger("insert to db error! msg:{}".format(e))
            return False
        return True

    def __NorData(self, data, fields):
        return {key:data[key] for key in fields if key in data}

    def cosine(self, orivec, simvec):
        num = np.dot(orivec, simvec)  # 向量点乘
        denom = np.linalg.norm(orivec) * np.linalg.norm(simvec)  # 求模长的乘积
        res = num / denom
        return res


class NewsFingerPrint(FpTool):
    '''
    title/keywords/embedding/title_split/title_entity/classes
    '''
    def __init__(self, logger=''):
        super(NewsFingerPrint, self).__init__(logger, write_log_to_kafka=True)
        self.features_extract = FeatureExtract()  ## 基础特征提取
        self.search_client = SimilaritySearch(logger=logger)   ## 召回

    @FpTool.cost
    def group_id(self, data):
        iid = data.get('iid','')
        data['title'] = data.get('title','').replace(" ","") # title 去空格
        if 'zt_d' in data.get('url'):   return iid   # 专题单独排重
        ## 空标题 垃圾内容不写索引
        if not data.get('title',''):    return iid
        if '黑猫投诉' in data.get('title', ''): return iid
        if data.get('dataid') and data.get('dataid').startswith("tt:") and long_news_spam(data.get('title')):  return iid # 垃圾长文章不走group_id
        if len(data.get('title', '')) > 100:    return iid ## 标题太长不计算
        try:
            ## group_id 缓存
            ret = self.hit_cache(title=data['title'])      
            if ret and 'group_id' in ret:
                self.logger("hit cache! title:{}\tret:{}".format(data['title'], ret))
                return ret['group_id']  
            try:
                ## 疫情报数类文章特殊处理
                spical_fp = self.features_extract.covid_news_fp(data.get('title',''), data.get('ctime'))
                if spical_fp:
                    self.logger("covid news:{} group_id:{}".format(json.dumps(data, ensure_ascii=False),spical_fp))
                    return spical_fp
            except Exception as e:
                self.logger("ERROR - covid news:{} mgs:{}".format(json.dumps(data, ensure_ascii=False),e))
            st = time.time()
            NorData = self.features_extract(data) 
            self.logger("features costs: {:.4f}".format(1000*(time.time()-st)))
            self.logger("input data:{}".format(json.dumps(NorData, ensure_ascii=False)))
            try:
                cands_docs = self.search_client(NorData)    # 粗召 
            except Exception as e:
                self.logger("search opensearch error! dataid:{}  msg:{}".format(data.get('dataid'), e))
                cands_docs = []
            NorData['group_id'] = NorData['iid']  # 初值
            NorData['origin']   = NorData['dataid']  # 初值
            NorData['reason']   = 'src'  # 初值
            # content_fp
            self.content_fp(NorData, cands_docs)
            #cands_docs = sorted(cands_docs,key=lambda x:x['score'], reverse=True)
            cands_docs = sorted(cands_docs,key=lambda x:x['jaccard'], reverse=True) # 优先召回字面相似
            if cands_docs:
                if cands_docs[0]['distance_v1']>=NorData['cosine_threshold'] and cands_docs[0]['jaccard']>=NorData['jaccard_threshold']:
                    NorData['group_id_v1'] = cands_docs[0].get('group_id_v1', cands_docs[0].get('group_id'))
                else:
                    new_group_id, origin, reason= self.select_cluster_v1(NorData, cands_docs)
                    if new_group_id:
                        NorData['group_id_v1'] = new_group_id
                ## 第一个候选满足 直接返回
                if cands_docs[0]['distance']>=NorData['cosine_threshold'] and cands_docs[0]['jaccard']>=NorData['jaccard_threshold']:
                    self.logger("@@@@@@@ top1 group_id:{} cand doc:{}".format(cands_docs[0]['group_id'], cands_docs[0]))
                    NorData['group_id'] = cands_docs[0]['group_id']
                    NorData['origin'] = cands_docs[0]['dataid']
                    NorData['reason'] = 'top1'
                ## 候选集重排序选择
                else:
                    new_group_id, origin, reason = self.select_cluster(NorData, cands_docs)
                    if new_group_id:    
                        NorData['group_id'] = new_group_id
                        NorData['origin']   = origin 
                        NorData['reason']   = reason

            NorData['root'] = 'false' 
            if NorData['group_id'] == NorData['iid']: # 新簇
                NorData['root'] = 'true' 
                self.logger("new cluster generated! data:{}".format(NorData))

            NorData['gtime'] = int(time.time())
            if "group_id_online" in data:  ## 版本升级使用
                NorData['new_group_id'] = NorData['group_id']
                NorData['group_id']     = data['group_id_online']

            ## group_id 缓存 并发 badcase
            time.sleep(random.uniform(0.002, 0.008)) 
            ret = self.hit_cache(title=data['title'])      
            if ret and 'group_id' in ret:
                self.logger("hit cache 2! title:{}\tret:{}".format(data['title'], ret))
                NorData['group_id'] = ret['group_id']  
            else:
                ## 写入缓存
                self.set_cache(title=data['title'], data={"group_id":NorData['group_id']},ex=43200)
            #if not NorData.get('group_id_v1',False):NorData['group_id_v1'] = NorData.get('group_id')
            ret = self.search_client.insert(NorData)
            self.logger("do insert! data:{} ret:{}".format(NorData, ret))
            return NorData['group_id'] 
        except Exception as e:
            self.logger("GROUP_ID ERROR! - input data:{} mgs:{}".format(json.dumps(data, ensure_ascii=False),e))
        return iid

    @FpTool.cost
    def select_cluster_v1(self, query_data, cands_docs):
        def __select_strastegy(query, cand):
            cosine_threshold  = query['cosine_threshold']
            jaccard_threshold = query['jaccard_threshold']
            keyword_threshold = query['keyword_threshold']
            self.logger("v1 query:{} - cand:{} select strastegy! {}\t{}".format(query['dataid'], cand['dataid'], cosine_threshold, jaccard_threshold))
            time_delta = abs(cand['ctime'] - query.get('ctime',time.time()))
            if time_delta > 259200:
                self.logger("v1 {} - {} - {} - {} - old news[3days] strastegy!  ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                cosine_threshold += 0.05
                jaccard_threshold += 0.25
                keyword_threshold += 0.15

            # jieccard策略
            if (cand['jaccard'] >= jaccard_threshold and cand['distance_v1'] > 0.55) or cand['jaccard'] >= (jaccard_threshold + 0.15):
                cand_reason[cand['dataid']] = 'jaccard'
                self.logger("v1 {} - {} - {} - {} - match jaccard score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True

            # cosine 策略
            if cand['distance_v1'] >= cosine_threshold and cand['jaccard'] >= max(jaccard_threshold / 2, 0.15):
                cand_reason[cand['dataid']] = 'cosine'
                self.logger("v1 {} - {} - {} - {} - match cosine score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True

            if query.get('content_type') != -1: return False    ## 垃圾内容不走keywords/实体词策略
            # keywords策略
            keywords, cand_keywords = query.get('keywords',''), cand.get('keywords','')
            keywords_len, cand_keywords_len = 0, 0
            if keywords:        keywords_len = len(keywords.split())
            if cand_keywords:   cand_keywords_len = len(cand_keywords.split())
            is_valid = (keywords_len >= 4 and cand_keywords_len >= 4)  ## keywords小于4 误判较多 无效
            if is_valid and cand['keywords_score'] >= keyword_threshold and cand['distance_v1'] >= (cosine_threshold / 2):
                cand_reason[cand['dataid']] = 'keywords'
                self.logger("v1 {} - {} - {} - {} - match keywords score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True
            ## 实体词策略 标题中共同实体词较多情况
            title_entity, cand_entity = [], []
            if query.get('title_entity', ''):
                title_entity = query.get('title_entity', '').split()
            if cand.get('title_entity', ''):
                cand_entity = cand.get('title_entity', '').split()
            if any(title_entity) and any(cand_entity) and len(set(title_entity) & set(cand_entity)) > 2 and cand['distance_v1'] > 0.6 and cand['jaccard'] > 0.15:
                cand_reason[cand['dataid']] = 'title_entity'
                self.logger("v1 {} - {} - {} - {} -  match entity score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True

            return False
        new_group_id, origin, cand_reason = '', query_data['dataid'], {}
        # 测试日志
        #for doc in cands_docs:
        #    self.logger("cands docs - {}\t{}\t{}\t{}\t{}".format(doc['classes'], doc['group_id'], query_data['title'], doc['title'], doc['score']))

        groud_id_dataid = {doc.get('group_id_v1',doc['group_id']):doc['dataid'] for doc in cands_docs}
        _most_fp = Counter([x.get('group_id_v1',x['group_id']) for x in cands_docs]).most_common(1) ## 出现次数最多的簇
        _cands_num = len(cands_docs)
        if _cands_num > 10 and (_most_fp[0][1] / _cands_num ) > 0.8:    ## 候选集某一个簇出现超过80% 直接召回
            self.logger("v1 @@@@@@@ mostly group_id_v1 same return! {}\t{}".format(_most_fp[0], _cands_num))
            return _most_fp[0][0], groud_id_dataid[_most_fp[0][0]], 'knn'

        docs_filterd = list(filter(lambda x: __select_strastegy(query_data, x), cands_docs))  ## 各阈值过滤
        if not docs_filterd:
            return new_group_id, origin, cand_reason.get(origin,'src')
        # 测试日志
        for doc in docs_filterd:
            self.logger("v1 docs filterd {}\t{}\t{}\t{}\t{}".format(doc.get('classes', '其他'), doc.get('group_id_v1',doc['group_id']), doc['title'], doc['score'],cand_reason[doc['dataid']]))
        # 目标选取 【能到这里的文章 肯定是满足某一个阈值策略的， 多数情况下可直接召回】
        docs_filterd = sorted(docs_filterd, key=lambda x:x['jaccard'], reverse=True)  ## 按照jiaccard排序 尽量出字面更相关的
        sorted_cluser_ids = Counter([x.get('group_id_v1',x['group_id']) for x in docs_filterd]).most_common(1)   ## 出现次数最多的簇
        groud_id_dataid = {doc.get('group_id_v1',doc['group_id']):doc['dataid'] for doc in docs_filterd}
        # 从满足阈值的候选集中选择一个
        if sorted_cluser_ids[0][1] > 2:
            new_group_id = sorted_cluser_ids[0][0]
            origin = groud_id_dataid[new_group_id]
        elif sorted_cluser_ids[0][1] > 1 and (docs_filterd[0]['jaccard'] > query_data["jaccard_threshold"] - 0.15 and docs_filterd[0]['distance_v1'] > query_data["cosine_threshold"] - 0.15):
            new_group_id = sorted_cluser_ids[0][0]
            origin = groud_id_dataid[new_group_id]
        else:
            new_group_id = docs_filterd[0].get('group_id_v1',docs_filterd[0]['group_id'])
            origin = groud_id_dataid[new_group_id]
        return new_group_id, origin, cand_reason.get(origin,'src')

    @FpTool.cost
    def select_cluster(self, query_data, cands_docs):
        def __select_strastegy(query, cand):
            cosine_threshold  = query['cosine_threshold']
            jaccard_threshold = query['jaccard_threshold']
            keyword_threshold = query['keyword_threshold']
            self.logger("query:{} - cand:{} select strastegy! {}\t{}".format(query['dataid'], cand['dataid'], cosine_threshold, jaccard_threshold))
            time_delta = abs(cand['ctime'] - query.get('ctime',time.time()))
            if time_delta > 259200:
                self.logger("{} - {} - {} - {} - old news[3days] strastegy!  ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                cosine_threshold += 0.05
                jaccard_threshold += 0.25
                keyword_threshold += 0.15

            # jieccard策略
            if (cand['jaccard'] >= jaccard_threshold and cand['distance'] > 0.55) or cand['jaccard'] >= (jaccard_threshold + 0.15):
                cand_reason[cand['dataid']] = 'jaccard'
                self.logger("{} - {} - {} - {} - match jaccard score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True

            # cosine 策略
            if cand['distance'] >= cosine_threshold and cand['jaccard'] >= max(jaccard_threshold / 2, 0.15):
                cand_reason[cand['dataid']] = 'cosine'
                self.logger("{} - {} - {} - {} - match cosine score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True

            if query.get('content_type') != -1: return False    ## 垃圾内容不走keywords/实体词策略
            # keywords策略
            keywords, cand_keywords = query.get('keywords',''), cand.get('keywords','')
            keywords_len, cand_keywords_len = 0, 0
            if keywords:        keywords_len = len(keywords.split())
            if cand_keywords:   cand_keywords_len = len(cand_keywords.split())
            is_valid = (keywords_len >= 4 and cand_keywords_len >= 4)  ## keywords小于4 误判较多 无效
            if is_valid and cand['keywords_score'] >= keyword_threshold and cand['distance'] >= (cosine_threshold / 2):
                cand_reason[cand['dataid']] = 'keywords'
                self.logger("{} - {} - {} - {} - match keywords score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True
            ## 实体词策略 标题中共同实体词较多情况
            title_entity, cand_entity = [], []
            if query.get('title_entity', ''):
                title_entity = query.get('title_entity', '').split()
            if cand.get('title_entity', ''):
                cand_entity = cand.get('title_entity', '').split()
            if any(title_entity) and any(cand_entity) and len(set(title_entity) & set(cand_entity)) > 2 and cand['distance'] > 0.6 and cand['jaccard'] > 0.15:
                cand_reason[cand['dataid']] = 'title_entity'
                self.logger("{} - {} - {} - {} -  match entity score strastegy! ".format(query['dataid'], query['title'],  cand['dataid'], cand['title']))
                return True

            return False
        new_group_id, origin, cand_reason = '', query_data['dataid'], {}
        # 测试日志
        #for doc in cands_docs:
        #    self.logger("cands docs - {}\t{}\t{}\t{}\t{}".format(doc['classes'], doc['group_id'], query_data['title'], doc['title'], doc['score']))

        groud_id_dataid = {doc['group_id']:doc['dataid'] for doc in cands_docs}
        _most_fp = Counter([x['group_id'] for x in cands_docs]).most_common(1) ## 出现次数最多的簇
        _cands_num = len(cands_docs)
        if _cands_num > 10 and (_most_fp[0][1] / _cands_num ) > 0.8:    ## 候选集某一个簇出现超过80% 直接召回
            self.logger("@@@@@@@ mostly group_id same return! {}\t{}".format(_most_fp[0], _cands_num))
            return _most_fp[0][0], groud_id_dataid[_most_fp[0][0]], 'knn' 

        docs_filterd = list(filter(lambda x: __select_strastegy(query_data, x), cands_docs))  ## 各阈值过滤
        if not docs_filterd:
            return new_group_id, origin, cand_reason.get(origin,'src')
        # 测试日志
        for doc in docs_filterd:
            self.logger("docs filterd {}\t{}\t{}\t{}\t{}".format(doc.get('classes', '其他'), doc['group_id'], doc['title'], doc['score'],cand_reason[doc['dataid']]))
        # 目标选取 【能到这里的文章 肯定是满足某一个阈值策略的， 多数情况下可直接召回】
        docs_filterd = sorted(docs_filterd, key=lambda x:x['jaccard'], reverse=True)  ## 按照jiaccard排序 尽量出字面更相关的
        sorted_cluser_ids = Counter([x['group_id'] for x in docs_filterd]).most_common(1)   ## 出现次数最多的簇
        groud_id_dataid = {doc['group_id']:doc['dataid'] for doc in docs_filterd}
        # 从满足阈值的候选集中选择一个  
        if sorted_cluser_ids[0][1] > 2:
            new_group_id = sorted_cluser_ids[0][0]
            origin = groud_id_dataid[new_group_id]
        elif sorted_cluser_ids[0][1] > 1 and (docs_filterd[0]['jaccard'] > query_data["jaccard_threshold"] - 0.15 and docs_filterd[0]['distance'] > query_data["cosine_threshold"] - 0.15):
            new_group_id = sorted_cluser_ids[0][0]
            origin = groud_id_dataid[new_group_id]
        else:
            new_group_id = docs_filterd[0]['group_id']
            origin = groud_id_dataid[new_group_id]   
        return new_group_id, origin, cand_reason.get(origin,'src')

    @FpTool.cost
    def content_fp(self, data, cands_docs):
        if data.get('content_type') != -1 and not data.get('keywords',''): ## 垃圾内容不走keywords策略
            return  
        keywords_len = len(data.get('keywords','').split()) if data.get('keywords','') else 0
        if keywords_len < 4:      ## keywords太少不可信
            return 
        keywords_cands_docs = sorted(cands_docs,key=lambda x:x['keywords_score'], reverse=True) # 关键词相似度排序
        try:
            for kcands in keywords_cands_docs:
                ckeywords_len = len(kcands.get('keywords','').split()) if kcands.get('keywords','') else 0
                if ckeywords_len < 4:   continue
                if kcands['keywords_score'] > 0.399:
                    self.logger("keywords! {}\t{}\t{}".format(data.get("dataid"), data.get("title"), json.dumps(keywords_cands_docs[0], ensure_ascii=False)))
                break
        except Exception as e:
            self.logger("content_fp error! msg:{}".format(e))

if __name__ == "__main__":
    from utils.mongodb_handler_online import MongoHandlerOnline
    mh = MongoHandlerOnline()
    dataid = "comos:kqcfnca2236970"
    dataid = "comos:ktzqtyu5254580"
    dataid = "comos:ktzscyy3573368"
    dataid = "comos:ktzscyy4265954"
    #dataid = "comos:ktzqtyu6202358"
    #dataid = "comos:ktzscyy4219721"
    dataid = "comos:ktzqtyu6440041"
    dataid = "comos:ktzscyy5653332"
    #dataid = "comos:ktzscyy3516515"
    #dataid = "comos:ktzqtyu8522496"
    dataid = "comos:mcwiwst3912684"
    #dataid = "tt:2309404750086998721105"
    nfp = NewsFingerPrint()
    ret = mh.findDocs(dataid=dataid)
    nfp.group_id(ret)
    exit()
    dataid_list = list(set([i.replace("dataid:","").strip() for i in open("dataid","r")]))
    #dataid_list = list(set([i.strip().split("\t")[0] for i in open("20211115_dataid", "r")])) # 评测版本
    #dataid_list = list(set([i.strip().split("\t")[0] for i in open("dataid_list_20211205", "r")]))
    #print("input length {} {}".format(len(dataid_cache), len(dataid_list)))
    #exit()
    for dataid in dataid_list:
        #if dataid in dataid_cache:     continue
        ret = mh.findDocs(dataid=dataid)
        if not ret: continue
        if 'title' not in ret:  continue
        if 'content' not in ret:        continue
        print("idataid - {}".format(dataid))
        #print(ret['dataid'])
        nfp.group_id(ret)
        #time.sleep(0.2)
        #break
