import json

from elasticsearch import Elasticsearch, helpers
from tqdm import tqdm
import time
import re
import unicodedata
import jieba


class ElasticsearchHandler():
    '''ES客户端，用于排重'''

    def __init__(self):
        self.tianyi_ip = ['10.23.2.21', '10.23.2.22', '10.23.2.23']
        self.tianyi_port = '9200'
        self.tianyi_index = 'simba_online'
        self.server = [{'host': i, 'port': self.tianyi_port} for i in self.tianyi_ip]
        self.index = self.tianyi_index
        print(self.server, self.index)
        # elasticsearch 对象
        self.es = Elasticsearch(hosts=self.server)

    def lsearch(self, query):
        '''数据量较大'''
        result = helpers.scan(client=self.es, index=self.index, query=query, scroll='5m')
        return result

    def search(self, query):
        result = self.es.search(index=self.index, body=query, preference='primary_first')['hits']['hits']
        return result

    def count(self, query):
        result = self.es.count(index='new_sim_fp_online', body=query, preference='primary_first')
        return result.get('count', 0)


def remove_punctuation(line, strip_all=True):
    if strip_all:
        rule = re.compile(r"[^a-zA-Z0-9\u4e00-\u9fa5]")
        line = rule.sub('', line)
    else:
        punctuation = """！？｡＂＃＄％＆＇（）＊＋－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏"""
        re_punctuation = "[{}]+".format(punctuation)
        line = re.sub(re_punctuation, "", line)
    return line.strip()


def findLength(A, B) -> int:
    A = remove_punctuation(A)
    B = remove_punctuation(B)

    def maxLength(addA: int, addB: int, length: int) -> int:
        ret = k = 0
        for i in range(length):
            if A[addA + i] == B[addB + i]:
                k += 1
                ret = max(ret, k)
            else:
                k = 0
        return ret

    n, m = len(A), len(B)
    ret = 0
    for i in range(n):
        length = min(m, n - i)
        ret = max(ret, maxLength(i, 0, length))
    for i in range(m):
        length = min(n, m - i)
        ret = max(ret, maxLength(0, i, length))
    return ret


pat = re.compile(r'原标题|来源')


class Data:
    def __init__(self):
        self.es = ElasticsearchHandler()
        # '搞笑'
        self.cls = {'科技', '职场', '房产', '情感', '国际', '体育', '游戏', '美食', '汽车', '三农', '时尚', '娱乐', '育儿', '动漫', '科学',
                    '旅游', '文化', '动物', '天气', '教育', '时政', '军事', '数码', '摄影', '健康', '财经', '社会', '家居', '星座', '历史'}

    def Tversky(self, list_1, list_2):
        if not list_1 or not list_2: return 0.0
        inter = float(len(set(list_1).intersection(set(list_2))))  # 交集
        l1_except = float(len(set(list_1).difference(set(list_2))))  # 在l1不在l2
        l2_except = float(len(set(list_2).difference(set(list_1))))  # 在l2不在l1
        return inter / (inter + l1_except + l2_except) if inter else 0

    def getdata(self, classes=None):
        resd = open('../data/alltestdata.txt', 'a+', encoding='utf-8')
        for idx, classes in enumerate(self.cls):
            count = 2000
            # if classes in {'科技', '游戏', '三农', '娱乐', '数码', '财经', '历史', '财经'}:
            #     count += 50000
            # elif classes in {'国际', '社会', '体育', '时政', '军事'}:
            #     count += 100000
            query = {
                '_source': ['dataid', 'title', 'supervise_topic', 'labels', 'title_fp', 'content_fp', 'group_id',
                            'classes_v2', 'sub_classes_v2'],
                "query": {"bool": {"must": [
                    {"match": {"toutiaoIdx": 1}},
                    {"match": {"docType": 'news'}},
                    {"term": {'classes_v2': classes}},
                    {'range': {'ctime': {'gte': int(time.time() - 86400 * 30)}}},
                ]}},
                "aggs": {"uid_aggs": {"cardinality": {"field": "group_id"}}}
            }
            res = self.es.lsearch(query=query)
            tmp_count = 0
            cache = []
            for i in tqdm(res, desc=f'{idx}:{classes}'):
                line = i.get('_source', {})
                title = line.get('title', '')
                flag = False
                for ot in cache:
                    if self.Tversky(ot, title) > 0.2:
                        flag = True
                        break
                if flag: continue
                cache.append(title)
                resd.write(json.dumps(line, ensure_ascii=False) + '\n')
                tmp_count += 1
                if tmp_count > count:
                    break

    def data(self):
        resd = open('../data/alltestdata.txt', 'r', encoding='utf-8')
        for i in resd.readlines():
            line = json.loads(i.strip())
            title = line.get('title')

            # title = ' '.join(jieba.lcut(title))
            classes = str(line.get('classes_v2'))[2:4]
            supervise_topic = list(line.get('supervise_topic', []))
            labels = list(line.get('labels', []))
            query = {'size': 60,
                     '_source': ['dataid', 'title', 'supervise_topic', 'labels', 'title_fp', 'content_fp', 'group_id',
                                 'classes_v2', 'sub_classes_v2'],
                     "query": {"bool": {"must": [
                         {"match": {"docType": 'news'}},
                         {"match": {"title": title}},
                         {"term": {'classes_v2': classes}},
                         {'bool': {'should': [
                             {"terms": {'supervise_topic': supervise_topic}},
                             {"terms": {'labels': labels}},
                         ]}},
                         {'range': {'ctime': {'gte': int(time.time() - 86400 * 30 * 5)}}},
                     ]}},
                     }
            res = self.es.search(query=query)
            same = ''
            dis = ''
            b1, b2 = float('inf'), 0
            lres = []
            for j in res:
                gid = line.get('group_id')
                rline = j.get('_source', {})
                rgid = rline.get('group_id')
                rt = rline.get('title', '')
                score = self.Tversky(title, rt)
                if score > b2 and score < 0.7 and score > 0.3:
                    same = rt
                    b2 = score
                if gid != rgid and score < b1:
                    dis = rt
                    b1 = score
            lres.append(title)
            lres.append(same)
            lres.append(dis)
            print(lres)
            if len(lres) == 3:
                saved = open('../data/testdata.txt', 'a+', encoding='utf-8')
                saved.write('\t'.join(lres) + '\n')
                saved.close()


if __name__ == '__main__':
    # test()
    data = Data()
    # data.getdata()
    data.data()
    # data.test()
    # cache = set()
    # resdata = open('new_seed1.txt', 'a+', encoding='utf-8')
    # for i in tqdm(data.getdata()):
    #     gid = i.get('_source').get('group_id')
    #     did = i.get('_id')
    #     if gid in cache: continue
    #     cache.add(gid)
    #     line = {'ori': i.get('_source')}
    #     resdata.write(json.dumps(line, ensure_ascii=False) + '\n')
