# coding:utf-8
#from LAC import LAC
from utils.embedding.embedding_client import SentenceEmbedding  # 新版 group_id
from utils.embedding.embedding_client_v1 import HttpClient
from utils.regulation import regulation, covid_regulation, AREA_RE, weather_regulation,SportsDetect, covid_num_regulation, olympic_regulation,is_xiangsheng
import time
import json
import jieba
import jieba.posseg as pseg
from utils.setting import LOG_PATH, ES_FIELDS, USER_DICT, USER_DICT_JIEBA
import requests
from utils.tool import FpTool
import re
import hashlib
#lac_client = LAC()
# 加载自定义词典
#lac_client.load_customization(USER_DICT, sep=None) ## 补充 ner
jieba.load_userdict(USER_DICT_JIEBA)

def word2vec(text):
    '''tencent embedding 200d'''
    headers = {'Content-Type': 'application/json',"connection":"close"}
    response = requests.post(url='http://10.13.40.138:8060/embedding/',
                            headers=headers,
                            data=json.dumps({"text":text}))
    ret = response.json()['ret']
    return ret['sentence_embedding'], ret['tokens'], ret['word_embedding']

class isHotNews(FpTool):
    def is_hot(self,title_split,_date, threshold=0.35):
        ## key为日期 如 20220401
        if not title_split or not _date:	return False
        if isinstance(title_split):	title_split = title_split.split()
        hot_query = json.loads(self.hot_redis_client.get(_date))
        for query in hot_query[:3]: ## top 3
            qkeywords = query.get('keywords')
            if not qkeywords or not isinstance(dkeywords, list):
                continue
            if self.jaccard_distance(title_split, qkeywords) >= threshold:
                return True
        return False

    def __call__(self, title_split, _date, threshold=0.35):
        try:
            return self.is_hot(title_split, _date, threshold)
        except:
            return False

class FeatureExtract():
    def __init__(self):
        self.embedding_client = SentenceEmbedding()
        self.embedding_client_v1 = HttpClient()
        self.threshold = json.load(open("data/classes_threshold.json", "r"))
        #self.idf_weights = json.load(open("data/tokens_weights.json", "r"))
        self.pnw = re.compile('《(.*?)》') # 作品识别
        self.pm = re.compile('\d+(?:\.\d+)?[%万千亿百]') # 财经数字识别
        self.covid_nums = re.compile("\d+[例]") # 疫情 数字识别
        self.covid_country = re.compile("^本土新增|国家卫健委|全国新增|31省|31个省") # 疫情 数字识别
        self.covid_int = re.compile("世卫组织|全球") # 疫情 数字识别
        self.covid_country_filter = re.compile("此地|我省|我市|国内一地") # 疫情 数字识别
        self.covid_filter = re.compile("新增|增加|增|\+|发现|\d例|\d人") # 疫情 数字识别
        self.not_covid = re.compile("近期|近期报告|累计报告|近日|最近|本周|本月|\d+\%|本轮|次轮") # 非疫情报数
        self.p_num = re.compile("\d+|一|二|三|四|五|六|七|八|九|十")
        self.impcity = {
	    "北京": re.compile('东城|西城|崇文|宣武|朝阳|海淀|丰台|石景山|通州|平谷|顺义|怀柔|昌平|门头沟|房山|大兴|密云|延庆'),
	    "上海": re.compile('黄浦|徐汇|长宁|静安|普陀|虹口|杨浦|浦东|闵行|宝山|嘉定|金山|松江|青浦|奉贤|崇明'),
	}
        self.sDetectClient = SportsDetect(football_team_file="data/football_team.txt")
        self.is_hot_news = isHotNews()

    def nw_regulation(self, text):
        return re.findall(self.pnw, text)

    def m_regulation(self, text):
        return re.findall(self.pm, text)

    def covid_num_regulation(self, text):
        return re.findall(self.covid_nums, text)

    def data_normalized(self,  data):
        assert isinstance(data, dict)
        assert 'title' in data and 'content' in data
        ret = self.__data_normalized(data)
        return ret

    def __call__(self, data):
        return self.data_normalized(data)

    def __embedding(self, text):
        try:
            _vector = self.embedding_client.inference(text)
        except Exception as e:
            # self.__logger("embedding error: {}".format(e))
            _vector = []
        return _vector

    def __embedding_v1(self, text):
        try:
            _vector = self.embedding_client_v1.inference([text])
        except Exception as e:
            # self.__logger("embedding error: {}".format(e))
            _vector = ''
        return _vector

    def __keywords(self, data):
        return []

    def __classes(self, data):
        return '其他'

    def __ner(self, text):
        split_ret = lac_client.run(text.replace(" ", ""))
        _ner = {}
        for word, flag in zip(split_ret[0], split_ret[1]):
            if word in ['年','月','日', '天']:	continue
            if flag == 'PER':
                _ner[word] = 'nr'
            elif flag == 'ORG':
                _ner[word] = 'nt'
            elif flag == 'TIME':
                _ner[word] = 't'
            elif flag == 'nz':
                _ner[word] = 'nz'
            elif flag == 'LOC':
                word = re.sub(AREA_RE, '', word)
                _ner[word] = 'ns'
        return _ner
 
    def __split(self, title):
        tokens_postag = {}
        postags = []
        for word, flag in pseg.cut(title.replace(" ", "")):
            if flag == 'ns':
                word = re.sub(AREA_RE, '', word)
            postags.append(flag)
            if not(len(word) > 1 or 'n' in flag or 'm' in flag):	continue
            if len(word) == 1 and flag in ["nr", "ns", "nt"]:
                flag = 'n'
            if word in ['年', '月', '日','天']:	continue
            tokens_postag[word] = flag
        # 提取作品名称
        nw_list = self.nw_regulation(title)
        for nw in nw_list:
            if nw not in tokens_postag:
                tokens_postag[nw] = 'nw'
            tokens_postag[nw] = 'nw'
        return tokens_postag, postags

    def country_covid(self, text, ctime,isForce=False):
        all_country = re.findall(self.covid_country, text)
        is_all_country = False | isForce
        flag = "国内"
        if all_country:
            if re.findall(self.covid_int, text):
                flag = '国际'
            pre_text = text.split(all_country[0])[0]   # 关键词前的句子
            if pre_text:
                tokens_postag, postags = self.__split(pre_text)
                if 'ns' not in postags and not re.findall(self.covid_country_filter, pre_text):  # 关键词前的句子 如果包含地名说明不是疫情整体报道
                    is_all_country = True
            else:
                is_all_country = True

        if not is_all_country:  return ""
        # 国内疫情报数        
        hword = flag + time.strftime("%Y-%m-%d", time.localtime(ctime))
        # 下午四点以后的报数 认为是当日疫情 【解决当日和昨日搞混问题】
        if ctime > int(time.mktime(time.strptime(time.strftime("%Y-%m-%d", time.localtime(ctime)), "%Y-%m-%d"))) + 57600: 
            return "c1_{}".format(hashlib.md5(hword.encode("utf-8")).hexdigest()[:5])
        return "c_{}".format(hashlib.md5(hword.encode("utf-8")).hexdigest()[:5])

    def covid_news_fp(self, text, ctime):
        def select_location(text):
            try:
                k = re.findall(self.covid_filter, text)[0]
                prefix_token, _ = self.__split(text.split(k)[0])
                prefix_ns = [word for word in prefix_token if prefix_token[word] == "ns"]
                if not prefix_ns:
                    return ""
                if len(prefix_ns) == 2: ## 多个地区选择最后的  解决如“北京昌平新增”、“上海浦东”
                    return prefix_ns[1]
                if len(prefix_ns) > 2:  ## 4月28日，北京朝阳区建外街道北郎东社区光辉里小区出现3例核酸检测初筛阳性...
                    if '北京' in prefix_ns:
                        region = re.findall(self.impcity['北京'], text.split(k)[0])
                        if any(region) and len(region) == 1:	return region[0]
                    if '上海' in prefix_ns:
                        region = re.findall(self.impcity['北京'], text.split(k)[0])
                        if any(region) and len(region) == 1:	return region[0]
                return prefix_ns[0]
            except Exception as e:
                print(e)
            return ""

        '''疫情报数类文章指纹特殊处理'''
        if not self.covid_filter.search(text):	return "" 
        if not covid_num_regulation(text):      return ""
        if not re.findall(self.p_num, text):    return ""
        if self.not_covid.search(text):	return ""
        if text and text.startswith("中国发布"):	text = text.replace("中国发布", "")
        try:
            ctime = int(ctime)
        except:
            ctime = int(time.time())

        tokens_postag, postags = self.__split(text)
        # 国内疫情总体通报
        all_country = self.country_covid(text, ctime)
        if all_country: 
            return all_country
        if 'ns' not in postags:  ## 不包含地名的也算法全国报数
            all_country = self.country_covid(text, ctime, isForce=True)
            if all_country: 
                return all_country
        #tokens_postag, postags = self.__split(text)
        ns_words = [word for word in tokens_postag if tokens_postag[word] == "ns"]
        nums = self.covid_num_regulation(text)
        if not ns_words and not nums:   return ""
        # 优先地名
        if ns_words:
            _location = select_location(text)
            hword = _location if _location else ns_words[0]
            hword += time.strftime("%Y-%m-%d", time.localtime(ctime))
            print(hword)
            # 下午四点以后的报数 认为是当日疫情 【解决当日和昨日搞混问题】
            if ctime > int(time.mktime(time.strptime(time.strftime("%Y-%m-%d", time.localtime(ctime)), "%Y-%m-%d"))) + 57600: 
                return "c1_{}".format(hashlib.md5(hword.encode("utf-8")).hexdigest()[:5])
            return "c_{}".format(hashlib.md5(hword.encode("utf-8")).hexdigest()[:5])
        # 其次数字
        elif nums:
            if len(nums) == 1:
                hword = nums[0]
            else:
                hword = sorted(nums)[0] # 一致性
            hword += time.strftime("%Y-%m-%d", time.localtime(ctime))
            print(hword)
            # 下午四点以后的报数 认为是当日疫情 【解决当日和昨日搞混问题】
            if ctime > int(time.mktime(time.strptime(time.strftime("%Y-%m-%d", time.localtime(ctime)), "%Y-%m-%d"))) + 57600: 
                return "c1_{}".format(hashlib.md5(hword.encode("utf-8")).hexdigest()[:5])
            return "c_{}".format(hashlib.md5(hword.encode("utf-8")).hexdigest()[:5])
        return ""

    def __data_normalized(self, data):
        '''特征计算、数据格式化'''
        properties = {}
        # 文章标示
        for key in ['uuid', 'iid', 'url', 'ctime', 'dataid', 'pkey', 'outlook', 'docType']:
            _tmp = data.get(key)
            if not _tmp: continue
            properties[key] = _tmp
        if 'dataid' not in properties:	properties['dataid'] = 'None'
        slot = int(time.time() - 86400 * 30) ## 召回时间范围限制 初始值 30天
        try:
            ctime = int(data.get('ctime',time.time()))
        except:
            ctime = int(ctime.ctime())
        properties['ctime'] = ctime
        # 领域特征
        _classes = data.get("classes", self.__classes(data))
        if _classes and isinstance(_classes, dict):
            properties['classes'] = list(_classes.keys())[0]
        if _classes and isinstance(_classes, str):
            properties['classes'] = _classes
        # 关键词特征
        _keywords = data.get("keywords", self.__keywords(data))
        if _keywords and isinstance(_keywords, list):
            properties['keywords'] = " ".join([r['word'] for r in _keywords])
        # 标题正文，正文只要前256个字
        properties['title'] = data.get('title')
        properties['content'] = data.get('content', '').strip()[:256]

        # 计算向量特征 【召回】
        if "embedding" not in data:
            properties["embedding"] = self.__embedding(data['title'])
        else:
            properties["embedding"] = data['embedding']

        # 计算向量特征 【召回】
        if "embedding_v1" not in data:
            properties["embedding_v1"] = self.__embedding_v1(data['title'])
        else:
            properties["embedding_v1"] = data["embedding_v1"] if isinstance(data["embedding_v1"], str) else ""
        
        # 领域阈值
        properties['cosine_threshold'] = self.threshold['cosine_threshold'].get(properties['classes'], 0.8)
        properties['jaccard_threshold'] = self.threshold['jaccard_threshold'].get(properties['classes'], 0.25)
        properties['keyword_threshold'] = 0.45
        if properties['classes'] in ['娱乐', '社会', '财经', '科技', '军事', '时政', '国际', '体育', '教育', '历史']: ## 主要领域 
            properties['content_type'] = -1
        # 遇到固定格式的文章阈值直接设置到最大
        if regulation(properties['title']): 
            properties['jaccard_threshold'] = 0.85
            properties['cosine_threshold'] = 0.85 
            properties['keyword_threshold'] = 0.85
            properties['content_type'] = 0
        # 天气类文章特殊处理
        if weather_regulation(properties['title']) or properties['classes'] == '天气':
            properties['jaccard_threshold'] = 0.65
            properties['cosine_threshold'] = 0.85 
            properties['keyword_threshold'] = 0.85
            properties['classes'] = '天气'   
            slot = self.__day_search(ctime) 
            properties['content_type'] = 1
        ## 分词
        tokens_postag, postags = self.__split(data.get('title'))
        # 取出疫情类文章地域，在keywords中加额外增加带词性的'ns:地域'
        if properties['classes'] == '新冠肺炎':
            slot = int(time.time() - 86400 * 2)
            covid_area = [area for area,flag in tokens_postag.items() if flag == 'ns'] if isinstance(tokens_postag, dict) else []
            if covid_area and not properties.get('keywords'):	properties['keywords'] = ''
            for area in covid_area:
                properties['keywords'] += ' ' + 'ns:' + area
        #tokens_ner = self.__ner(data.get('title'))
        #properties["title_ner"] = " ".join(list(tokens_ner.keys()))
        # 判断是否是热点
        strdate = time.strftime("%Y%m%d", time.localtime(ctime))
        properties['isHotNews'] = self.is_hot_news(list(tokens_postag.keys()), strdate)
        ## ner
        _title_entity = {tk:tokens_postag[tk] for tk in tokens_postag if tokens_postag[tk] in ['nr', 'ns', 'nt', 'm','nw','nz','t']}
        #_title_entity.update(tokens_ner)
        # 实体词过滤策略
        word_filter = self.ner_filter(properties['classes'], _title_entity)
        if word_filter:  properties['word_filter'] = word_filter
        # 体育赛事特殊处理
        if self.sDetectClient.detect(data.get('title')) and properties['classes'] == '体育':
            ## 严格控制为当天凌晨， 体育赛果凌晨后都是当天文章
            slot = self.__day_search(ctime, ishard=True)
            if data.get('outlook', '') in self.sDetectClient.media_white_list:
                properties['jaccard_threshold'] += 0.1
            sports_query_info = self.sDetectClient.extract_info(data.get('title'))
            properties['sport_match'] = sports_query_info
            match_score = sports_query_info.get('score', [])
            if match_score:	_title_entity.update(dict(zip(match_score, len(match_score)*['sc'])))
            match_team = sports_query_info.get('team', [])
            if match_team:	_title_entity.update(dict(zip(match_team, len(match_team)*['st'])))
            if 'word_filter' not in properties: 
                if (match_score + match_team):	properties['word_filter'] = match_score + match_team
            else:
                if (match_score + match_team):	properties['word_filter'].extend(match_score + match_team)
        # 冬奥会特殊处理
        if properties['classes'] == '体育':
            sports, athlete = olympic_regulation(data.get('title'))
            if sports:	properties['sports'] = sports
            if athlete:	properties['athlete'] = athlete
            
        # 肺炎文章特殊处理
        if covid_regulation(properties['title']):
            properties['jaccard_threshold'] = 0.35
            properties['cosine_threshold'] = 0.81 
            properties['keyword_threshold'] = 0.45
            slot = self.__day_search(ctime) 
            _tmp = [tk for tk in _title_entity if _title_entity[tk] == 'ns']
            if 'word_filter' not in properties: 
                properties['word_filter'] = _tmp
            else:
                properties['word_filter'].extend(_tmp)
        if properties['classes'] == '财经': ## 财经领域 百分比不同过滤
            _m = self.m_regulation(data.get('title'))
            if _m:	
                _title_entity.update(dict(zip(_m, len(_m)*['mm'])))
                if 'word_filter' not in properties:	properties['word_filter'] = _m
                if 'word_filter' in properties:	properties['word_filter'].extend(_m)
        properties["title_entity"] = " ".join(_title_entity)
        # 垃圾内容 
        if not properties["title_entity"] and properties["classes"] not in ['社会', '时政']:
            properties['jaccard_threshold'] += 0.1    
            properties['cosine_threshold'] += 0.1
            properties['keyword_threshold'] += 0.1
        # 影视片段
        #if postags[0] == 'nw' and postags[1] == 'x' and properties["classes"] in ['娱乐', '音乐', "旅游", "影视", "摄影", "舞蹈", "动漫"]:
        if postags[0] == 'nw' and properties["classes"] in ['娱乐', '音乐', "旅游", "影视", "摄影", "舞蹈", "动漫"]:
            properties['jaccard_threshold'] = 0.75    
            properties['cosine_threshold'] = 0.85 
            properties['keyword_threshold'] = 0.65
            properties['content_type'] = 2
        if is_xiangsheng(properties["title"]):
            properties['jaccard_threshold'] = 0.45    
            properties['cosine_threshold'] = 0.83 
            properties['keyword_threshold'] = 0.45
            properties['content_type'] = 2
        if properties['classes'] in ['影视', '动漫']:	properties['content_type'] = 3 
        if len(tokens_postag.keys()) <= 3 and not properties["title_entity"]:	properties['jaccard_threshold'] *= 2
        if len(set(tokens_postag.keys())) <= 1 :	properties['jaccard_threshold'] = 1.0
        properties['jaccard_threshold'] = min(properties['jaccard_threshold'], 0.8)
        properties['cosine_threshold'] = min(properties['cosine_threshold'], 0.9)
        properties["title_split"] = " ".join(list(tokens_postag.keys()))
        if properties['dataid'].startswith("tt:"):
            properties['jaccard_threshold'] = 0.55    
            properties['cosine_threshold'] = 0.85 
            properties['content_type'] = 4
       
        if 'russia_time_from' in data:
            try:
                _ftime = int(data['russia_time_from'])
                if _ftime < int(time.time()):	slot = _ftime
            except:
                pass 
        properties["time_from"] = slot
        return properties

    def modify_classes(self,query_data, cands_classes):
        if query_data['classes'] == cands_classes or not cands_classes:
            return query_data
        query_data['classes'] = cands_classes
        return self.__data_normalized(query_data)

    def __day_search(self, ctime, ishard=False):
        slot = ctime - (ctime - time.timezone) % 86400
        if ishard:	return slot
        if int(time.time()) - slot < 5*3600:  # 凌晨五点之前 往前查一天
            slot = int(time.time()) - 86400
        return slot

    def ner_filter(self, classes, tokens_postag):
        word_filter = []
        # 此四类文章有全人名约束
        if classes == '娱乐': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] in ('nr', 'nw')]
        # 教育类文章要有人名、地名约束
        if classes in ['社会', '教育', '时政']: word_filter = [tk for tk in tokens_postag if tokens_postag[tk] in ('nr','ns','nt')] 
        # 解决肺炎误伤问题
        if classes in ['健康', '时政']: word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 'ns'] 
        # 气象类文章用时间、地名约束
        if classes == '天气': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] in ('ns', 't')] 
        # 财经基金类自动生成文章处理
        if classes == '财经': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] in ('nt', 'nr', 'ns')]
        if classes == '房产': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 'ns']
        if classes == '汽车': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 'nz']
        if classes == '星座': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 't']
        if classes == '美食': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 'nf']
        # 新馆肺炎地域限制
        if classes == '新冠肺炎': word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 'ns']
        # 需要过滤作品 
        if classes in ['音乐', "旅游", "影视", "摄影", "舞蹈", "动漫", "游戏"]: word_filter = [tk for tk in tokens_postag if tokens_postag[tk] == 'nw'] 
        return word_filter




if __name__ == "__main__":
    data = {"dataid": "comos:ktzscyy1895547", 
        "content": "鹿晗晒车内短发侧颜照 着牛仔衬衫帅气清爽少年感十足", 
        "iid": "f0sfF", 
        "ctime": 1635239719, 
        "title": "组图：鹿晗晒车内短发侧颜照 着牛仔衬衫帅气清爽少年感十足", 
        "classes":{"娱乐":0.9},
        "uuid":"9090"
        }

    features_extract = FeatureExtract()
    print(features_extract.nw_regulation(data['title']))
    st1 = time.time()
    ret = features_extract.data_normalized(data)
    st2 = time.time()
    print(ret, 1000*(st2-st1)) 

    st1 = time.time()
    ret = features_extract(data)
    st2 = time.time()
    print(ret, 1000*(st2-st1)) 
