# encoding='utf-8'
"""
@Author: 张晟烨
@Date: 2022/08/03
@Email: zhangsy@zylliondata.com
@Description:  某单个搜索词的去重结果
@Version: Python3.8
@Modified By:

"""
import csv
import json
import re
import time
from itertools import combinations
from urllib.parse import urlparse

import jieba.analyse
import requests
from dateparser import parse

# import config
from simhash_core import Simhash, Index


def collect_data(keyword):
    """
    输入搜索词，获得500个结果
    :param keyword: kw
    :return: 500个结果
    """
    # url = "http://192.168.1.89:5273/api/v1/qs/search?="
    url = config.Config.ES_HOST
    payload = json.dumps({
        "type": "pc",
        "mime": "HTML",
        "page": 1,
        "size": 500,
        "searchWords": keyword,
        "selectedIndustry": "",
        "selectedTopic": "",
        "selectedArea": ""
    })
    headers = {
        'Content-Type': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    data_dict = json.loads(response.text)
    return data_dict


def processing(dict_list):
    """
    从搜索结果中，获取想要的字段
    :param dict_list: 搜索结果
    :return: 想要的字段
    """
    newdic = [{'id_du': index, 'title': i['title'], 'description': i['description'], 'target': i['target'],
               'releaseAt': i['releaseAt'], 'es_id': i['_id']} for index, i in enumerate(dict_list)]
    return newdic


def get_features(content):
    """
    将字符串处理为带有权重的字典，比下一个函数的特征提取更加准确，但处理时间过长，暂时弃用
    :param content: 字符串
    :return: 带权重的字典
    """
    content = content.strip("")
    seg = list(jieba.cut(content))
    print(seg
          )
    # jieba基于TF-IDF提取前10位关键词
    keyWords = jieba.analyse.extract_tags("|".join(seg), topK=10, withWeight=True, allowPOS=())
    print(keyWords)
    new_words = []
    for i in keyWords:
        sort = seg.index(i[0])
        sort_tuple = (sort,)
        i = i + sort_tuple
        new_words.append(i)
    new_words.sort(key=lambda x: x[2])
    new_words = [(i[0], i[1]) for i in new_words]
    return new_words


def get_another_features(content):
    """
    不分词，用最简单的n-gram，时间短。要求被处理的文本有一定长度，不然极端的不准
    :param content:
    :return: 不带权重的n-gram字典

    """
    width = 3
    content = re.sub(r'[^\w]+', '', content)
    return [content[i:i + width] for i in range(max(len(content) - width + 1, 1))]


def process_title(title):
    pattern = '_|-'
    title_list = re.split(pattern, title)
    title = ''.join(title_list[:-1])
    return title.strip()


def deduplicate(target_dictionary, feature_method='simple', k_value=3):
    """
    接受上一步的参数，去重
    :param k_value: k值
    :param feature_method: 选择使用简单额n-gram作为特征输入获取hash值，或者使用复杂的jieba分词作为特征获取hash值
    :param target_dictionary: 经过处理后的字典
    :return: 重复id
    """
    get_data_start = time.time()
    data = processing(target_dictionary)
    get_data_end = time.time()
    print("处理字典花费时间为:", get_data_end - get_data_start)

    # 两种数据的hash方式

    if feature_method == 'simple':
        sim_list = [(i['es_id'], Simhash(get_another_features(process_title(i['title']) + i['description']))) for i in
                    data]
    elif feature_method == 'complex':
        sim_list = [(i['es_id'], Simhash(get_features(process_title(i['title']) + i['description']))) for i in data]
    else:
        raise TypeError('没有第三条路')
    processing_data_end = time.time()
    # print("hash数据花费时间为:", -(get_data_start - processing_data_end))
    # 拿出重复数据的bucket
    index = Index(sim_list, k=k_value)
    duplicated_list = []
    for i in sim_list:
        target_list = index.get_near_dups(i[1])
        if len(target_list) >= 2 and target_list not in duplicated_list:
            duplicated_list.append(target_list)
    #  将bucket中的多元组变成不重复的二元组列表
    binary_list = []
    for i in duplicated_list:
        for combs in list(combinations(i, 2)):
            if combs not in binary_list:
                binary_list.append(combs)
    id_list = []

    #  二元组中，储存值为数据的序号，判断属于哪种情况
    case_list = []
    for i in binary_list:
        dic1 = target_dictionary[int(i[0])]
        dic2 = target_dictionary[int(i[1])]

        if dic1['title'].replace(' ', '') == dic2['title'].replace(' ', ''):
            dic1['duplicate_status'] = 'situation1'
            dic2['duplicate_status'] = 'situation1'
            try:
                text1 = (parse(dic1['releaseAt']) - parse(dic2['releaseAt'])).days
            except:
                text1 = 0
            if text1 <= 0:
                index = i[0]
            else:
                index = i[1]

            id_list.append(index)

        elif urlparse(dic1['target']).netloc != urlparse(dic2['target']).netloc and dic1['title'] != dic2['title']:
            dic1['duplicate_status'] = 'situation2or5'
            dic2['duplicate_status'] = 'situation2or5'
            try:
                text1 = (parse(dic1['releaseAt']) - parse(dic2['releaseAt'])).days
            except:
                text1 = 0
            if text1 <= 0:
                index = i[0]
            else:
                index = i[1]
            id_list.append(index)
        else:
            dic1['duplicate_status'] = 'situation3or4'
            dic2['duplicate_status'] = 'situation3or4'
        case_list.append(dic1)
        case_list.append(dic2)

    deduplicate_end = time.time()
    print("hash去重花费时间为：", deduplicate_end - processing_data_end)
    duplicate_esid_list = []
    for duplicated_data_id in list(set(id_list)):
        duplicate_esid_list = [i['es_id'] for i in data if
                               int(i['id_du']) == int(duplicated_data_id)] + duplicate_esid_list
    return duplicate_esid_list


def process_data(target_dictionary):
    """
    传入目标字典，然后对于目标字典进行去重，返回去重后的字典
    :param target_dictionary: 标的字典
    :return: 输出为去重后的字典
    """
    try:
        title_list = []
        for i in target_dictionary:
            title = process_title(i['title'])
            if title not in title_list:
                title_list.append(title)
            else:
                target_dictionary.remove(i)
    except:
        pass

    id_ist = deduplicate(target_dictionary, feature_method='simple')
    for i in id_ist:
        for _id in target_dictionary:
            if _id['_id'] == i:
                target_dictionary.remove(_id)
    return target_dictionary


def visualization(input_list, target_dictionary, feature_method, keyword, k_value=3):
    """
    数据可视化（很多参数重复加载很蠢，但是考虑到这个函数只用一次（最好），就不写成class了，
    不然以后调用主程序都要实例化一个class，也让人很难受。
    :param k_value: k值
    :param keyword: 在输入一遍搜索词，为了展示用
    :param input_list: deduplicate 函数中获取到的，需要被去掉的数据的id列表
    :param target_dictionary:  原始数据，来自于对api的请求
    :param feature_method:   加密数据方式
    :return: 本质上是生成csv方便观看的方法，所以不需要return
    """

    data = processing(target_dictionary)
    print(11111,data)
    if feature_method == 'simple':
        sim_list = [(i['id_du'], Simhash(get_another_features(i['title'] + i['description']))) for i in data]
    elif feature_method == 'complex':
        sim_list = [(i['id_du'], Simhash(get_features(i['title'] + i['description']))) for i in data]
    else:
        raise TypeError('没有第三条路')
    index = Index(sim_list, k=k_value)

    with open("inspect" + feature_method + '.csv', 'w', encoding='utf-8', newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(
            ['搜索词', '被去掉的数据id', '跟他重复的数据id', '标题', '摘要', 'url', 'releaseAt',
             '重复类别（1、2、5删，3、4保留）', '结果', '留存'])

        target_hash = ''
        for duplicate_id in input_list:
            for i in sim_list:
                if str(i[0]) == str(duplicate_id):
                    target_hash = i[1]
            target_list = index.get_near_dups(target_hash)
            # print(duplicate_id, target_list)
            writer.writerow([keyword, duplicate_id, '', target_dictionary[int(duplicate_id)]['title'],
                             target_dictionary[int(duplicate_id)]['description'],
                             target_dictionary[int(duplicate_id)]['target'],
                             target_dictionary[int(duplicate_id)]['releaseAt']])
            for i in target_list:
                if i not in input_list:
                    end = '保留'
                else:
                    end = '删除'
                if int(i) != int(duplicate_id):
                    if target_dictionary[int(duplicate_id)]['title'] == target_dictionary[int(i)]['title']:
                        status = "situation1"
                        result = "判定A列数据为重复数据，需要删除A列数据"
                    elif urlparse(target_dictionary[int(i)]['target']).netloc == urlparse(
                            target_dictionary[int(i)]['target']).netloc:
                        status = 'situation3/4'
                        result = "判定A列数据为非重复数据，展示可以保留A列数据"
                    else:
                        status = 'situation2/5'
                        result = "判定A列数据为重复数据，需要删除A列数据"

                    writer.writerow([keyword, duplicate_id, i, target_dictionary[int(i)]['title'],
                                     target_dictionary[int(i)]['description'],
                                     target_dictionary[int(i)]['target'],
                                     target_dictionary[int(i)]['releaseAt'], status, result, end])
            writer.writerow([])
        csvfile.close()

def  visualizations(target):
    new_dic = process_data(target)
    new_list = []
    reserved_title_list = [i['title'] for i in new_dic]
    for i in new_dic:
        if i['title'] in reserved_title_list:
            i['reserved'] = True
            new_list.append(i)

    return new_list



if __name__ == '__main__':
    import warnings

    #
    warnings.filterwarnings('ignore')
    kw = '挖掘机'
    # dictionary = collect_data(kw)['data']['content']
    # print(dictionary[0:4])
    dictionary = [{'_id': 'q-BDXH4B9RWwI_ZSlJUN',
                   'description': '2022年2月23日，由芯智库主办，天风研究所、芯片超人协办，新华社长三角区域运营总部支持举办的“首届半导体产业峰会暨芯智库成立大会”（以下简称“大会”）在上海浦东隆重开幕。本届大会以“硬核芯时代”为',
                   'title': '以“芯智库”为起点！芯片超人开始连接芯片行业的一切-中国 ...',
                   'releaseAt': '2012-05-13T00:00:00',
                   'target': 'https://news.bjx.com.cn/html/20221010/1259527.shtml', 'thumbnail': None,
                   'mimeType': 'text/html',
                   'spec': False, 'viewType': 'NORMAL', 'resolution': None,
                   'source': 'http://zixun.trustexporter.com/list/40286.htm', 'industry': None},
                  {'_id': 'q-BDXH4B9RWwI_ZSlJaN',
                   'description': '2022年2月23日,由芯智库主办,天风研究所、芯片超人协办,新华社长三角区域运营 总 部支持举办的“首届半导体产业峰会暨芯智库成立大会”(以下简称“大会”)在上海浦东隆重开幕。 本届大会以“硬核芯时代”为主题,聚',
                   'title': '以“芯智库”为起点！芯片超人a始连接芯片行业的一切_中华网',
                   'releaseAt': '2012-05-13T00:00:00',
                   'target': 'https://www.evpartner.com/news/67/detail-63536.html', 'thumbnail': None,
                   'mimeType': 'text/html',
                   'spec': False, 'viewType': 'NORMAL', 'resolution': None,
                   'source': 'http://zixun.trustexporter.com/list/40286.htm', 'industry': None},
                     {'_id': 'q-BDXH4B9RWwI_ZSlJaN',
                   'description': '2022年2月23日,由芯智库主办,天风研究所、芯。 本届大会以“硬核芯时代”为主题,聚',
                   'title': '以“芯智库”为起点！芯片切_中华网',
                   'releaseAt': '2012-05-13T00:00:00',
                   'target': 'https://www.evpartner.com/news/67/detail-63536.html', 'thumbnail': None,
                   'mimeType': 'text/html',
                   'spec': False, 'viewType': 'NORMAL', 'resolution': None,
                   'source': 'http://zixun.trustexporter.com/list/40286.htm', 'industry': None}

                  ]
    old = time.time()
    for i in process_data(dictionary):
        print(i)
    print(visualizations(dictionary))
    new = time.time()
    print("总时间花费为：", new - old)
