import json
import time
import re
from mongoengine import *
# 引入api_tools1.py中定义的接口工具类和方法
from pretreat_tools.api_tools import ApiTools
# 引入models.py中定义的实体类
from SpiderAction.News import NewsSpider

from pretreat_tools.word_treat import sensitive_word

# API密钥
secret_key = "b29fc8faceb28e90d54446c3a7ca5af6"
# url地址
url = "http://tanqin520.udutech.com:8400/news/insertRecord"
# 连接数据库
connect('record', host='127.0.0.1', port=27017)
# 需要的词库地址
sensitive_word_dir = "D:\\PythonWork\\SpiderTest\\RequestFiles\\敏感词"


def send_data(qurey_Set):
    """
    中文处理（分词、删除停用词、判断是否有敏感词）并发送json到接口的方法
    :param qurey_Set: 数据库中的一条新闻资讯（资讯一条条传给后台）
    :return: data 敏感词 敏感标识 发送的反馈信息
    """
    data_send_dict = {"navId": qurey_Set.status,
                      "title": qurey_Set.title,
                      "tags": "#",
                      "mainFigure": "#",
                      "details": qurey_Set.article[1:-1],
                      "source": qurey_Set.sourceName,
                      }
    # 敏感词库
    sensitive_list = sensitive_word(sensitive_word_dir)
    # 判断是否有敏感词
    strings = data_send_dict["details"]  # 文章的内容
    # 判断文章里是否含有敏感词，并将其标注为黄色,前台可视状态
    count1 = 0
    word_in_sen = []
    re_flag1 = re.compile(r'>[\s\S]*?[\s\S]*?<')
    re_list = re_flag1.findall(strings)
    for word in sensitive_list:
        for i in re_list:
            if word in i:
                index1 = strings.index(i)
                index2 = i.index(word)
                str1 = strings[0: index1 + index2]
                str2 = strings[index1 + index2 + len(word):]
                data_send_dict["details"] = str1 + '<span class="marker">' + word + '</span>' + str2
                word_in_sen.append(word)
                count1 += 1
            else:
                pass
    if count1 == 0:
        nav_id = 2
    else:
        nav_id = 1
    data_send_dict["navId"] = nav_id
    # 调用API接口工具发送数据
    r = ApiTools(url, secret_key, data_send_dict).sign().send()
    # 更新数据库nav_id的值
    qurey_Set.update(status=str(nav_id), upsert=True)
    # 更新数据库examine标签 表示该咨询过滤过pass、或是发送失败fail
    print("服务器相应结果：", r)
    r = json.loads(r)
    if r["code"] == 0:
        qurey_Set.examine = "pass"
        qurey_Set.save()
    else:
        qurey_Set.examine = "fail"
        qurey_Set.save()
    data = {"r": r, "word_in_sen": word_in_sen}
    return data


def screening_flag():
    """
    筛选examine，为suspend的是没有过滤上传的，为pass的是过滤上传过的
    :return:
    """
    # 用于统计此时段上传数据总数
    count = 0
    # 用于汇总上传所过滤到的词
    sen_word_list = []
    t3 = time.time()
    all_data = NewsSpider.objects.filter(examine='suspend')  # 查找未过滤的，未过滤：suspend；已过滤：pass
    # 一条一条的读取信息
    for news in all_data:
        # 上传数据到资讯后台
        print("正在上传数据信息")
        result_data = send_data(news)
        count += 1
        sen_word_list += result_data["word_in_sen"]
    # 打印去重之后的列表
    sen_list = list(set(sen_word_list))
    sensitive_list = list(filter(None, sen_list))
    if len(sensitive_list):
        print("该批次总共过滤到的敏感词为：", sensitive_list)
    else:
        print("过滤没有发现敏感词")
    print("一共处理了多少条资讯：", count)
    t4 = time.time()
    print("处理总共花了多长时间（秒级）：", t4 - t3)