#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-

import json
import random
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument

from base_spider import BaseSpider
from conf.conf_util import title_strip, upload_image_delete_pic2, send_request, \
    sha256_all_text, clear_html, parse_xpath, judge_xpath
from conf.database import DATABASE
from conf.dber import MongoDBer
from conf.logging_debug import MyLogging


# 批量更新
class Luoyangggzy(BaseSpider):
    def __init__(self, db_name):
        super(Luoyangggzy, self).__init__()
        self.headers = {
            # "Cookie": "JSESSIONID=325F32A63F535B0ECC371B5881FAAE44; jfe_pin=edb5223c; jfe_ts=1667356652.668; jfe_sn=cj4oWRHJrJdcLhbDnSWPd5qP5hU=",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/103.0.0.0 Safari/537.36",
        }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.db_m1 = MongoDBer(DATABASE['guxulong'])  # mongodb 库连接对象
        self.luoyangggzy_param = self.db_m["luoyangggzy_param"]
        self.luoyangggzy_param_err = self.db_m["luoyangggzy_param_err"]
        self.luoyangggzy_data = self.db_m1["a_luoyangggzy_detail"]
        self.xpaths = ['//div[@class="article-info"]', ]  # 详情页正文
        # 日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()  # 获得本机ip

    def get_params(self):
        """
        获取筛选参数的流程
        """
        param_dict = {
            "建设工程": {
                "招标计划": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001001",
                        "招标公告", "建设工程", '招标计划', ''],
                # "招标公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                #         "招标公告", "建设工程", '招标公告'],
                "招标公告市本级": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410301'],
                "招标公告偃师区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew",  "003001002",
                        "招标公告", "建设工程", '招标公告', '410381'],
                "招标公告孟津区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410322'],
                "招标公告涧西区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410305'],
                "招标公告宜阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410327'],
                "招标公告伊川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410329'],
                "招标公告新安县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410323'],
                "招标公告汝阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410326'],
                "招标公告洛宁县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410328'],
                "招标公告嵩县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410325'],
                "招标公告栾川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001002",
                        "招标公告", "建设工程", '招标公告', '410324'],
                "变更公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001003",
                        "招标公告", "建设工程", '变更公告', ''],
                "中标候选人公示": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001004",
                        "中标公告", "建设工程", '中标候选人公示', ''],
                "中标公示市本级": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410301'],
                "中标公示偃师区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410381'],
                "中标公示孟津区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410322'],
                "中标公示涧西区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410305'],
                "中标公示宜阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410327'],
                "中标公示伊川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410329'],
                "中标公示新安县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410323'],
                "中标公示汝阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410326'],
                "中标公示洛宁县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410328'],
                "中标公示嵩县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410325'],
                "中标公示栾川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001005",
                        "中标公告", "建设工程", '中标公示', '410324'],
                "合同信息": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001006",
                        "中标公告", "建设工程", '合同信息', ''],
                "合同履行": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003001006",
                        "中标公告", "建设工程", '合同履行', ''],
            },
            "政府采购": {
                "采购意向": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002001",
                        "招标公告", "政府采购", '采购意向', ''],
                # "采购公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                #         "招标公告", "政府采购", '采购公告'],
                "采购公告市本级": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410301'],
                "采购公告偃师区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410381'],
                "采购公告孟津区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410322'],
                "采购公告涧西区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410305'],
                "采购公告宜阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410327'],
                "采购公告伊川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410329'],
                "采购公告新安县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410323'],
                "采购公告汝阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410326'],
                "采购公告洛宁县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410328'],
                "采购公告嵩县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410325'],
                "采购公告栾川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002002",
                        "招标公告", "政府采购", '采购公告', '410324'],
                "变更公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002003",
                        "招标公告", "政府采购", '变更公告', ''],
                # "结果公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                #         "中标公告", "政府采购", '结果公告', ''],
                "结果公告市本级": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410301'],
                "结果公告偃师区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410381'],
                "结果公告孟津区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410322'],
                "结果公告涧西区": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410305'],
                "结果公告宜阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410327'],
                "结果公告伊川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410329'],
                "结果公告新安县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410323'],
                "结果公告汝阳县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410326'],
                "结果公告洛宁县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410328'],
                "结果公告嵩县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410325'],
                "结果公告栾川县": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002004",
                        "中标公告", "政府采购", '结果公告', '410324'],
                "废标公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002005",
                        "招标公告", "政府采购", '废标公告', ''],
                "合同公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003002006",
                        "中标公告", "政府采购", '合同公告', ''],
            },
            "产权交易": {
                "预公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003003001",
                        "招标公告", "产权交易", '预公告', ''],
                "交易公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003003002",
                        "招标公告", "产权交易", '交易公告', ''],
                "结果公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003003004",
                        "中标公告", "产权交易", '结果公告', ''],
            },
            "土地交易": {
                "预公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003004001",
                        "招标公告", "土地交易", '预公告', ''],
                "交易公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003004002",
                        "招标公告", "土地交易", '交易公告', ''],
                "变更公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003004003",
                        "招标公告", "土地交易", '变更公告', ''],
                "成交公示": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003004004",
                        "中标公告", "土地交易", '成交公示', ''],
            },
            "矿业权交易": {
                "采矿权公示公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003005001",
                        "招标公告", "矿业权交易", '采矿权公示公告', ''],
                "探矿权公示公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003005002",
                        "招标公告", "矿业权交易", '探矿权公示公告', ''],
                "其他事项/补充公告": ["http://111.7.67.183/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew", "003005003",
                        "招标公告", "矿业权交易", '其他事项/补充公告', ''],
            },
        }
        for industry in param_dict:
            for ifbprogress in param_dict[industry]:
                data_dict = {
                    "industry": param_dict[industry][ifbprogress][3],
                    "ifbprogresstag": param_dict[industry][ifbprogress][4],
                    "ifbprogress": param_dict[industry][ifbprogress][2],
                    "link": param_dict[industry][ifbprogress][0],
                    "equal_": param_dict[industry][ifbprogress][1],
                    "equal_1": param_dict[industry][ifbprogress][5],
                    "page": 0,
                    "status": 0
                }
                sql_res = self.luoyangggzy_param.find_one({
                    "industry": param_dict[industry][ifbprogress][3],
                    "ifbprogresstag": param_dict[industry][ifbprogress][4],
                    "ifbprogress": param_dict[industry][ifbprogress][2],
                    "link": param_dict[industry][ifbprogress][0],
                    "equal_": param_dict[industry][ifbprogress][1],
                    "equal_1": param_dict[industry][ifbprogress][5],
                })
                if not sql_res:
                    self.luoyangggzy_param.insert_one(data_dict)

    def get_all_number_page(self, param_res, param_result):
        """
        获得总页数
        """
        try:
            res_ = json.loads(param_res)
            all_number_page = res_["result"]["totalcount"] / 10
            return int(all_number_page + 1)
        except:
            return 1

    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = False
        title_list = []
        originalurl_list = []
        publishdate_list = []
        county_list = []
        result_list = []
        param_res_xpath = json.loads(param_res)
        # 列表页
        param_res_xpath_list = param_res_xpath['result']["records"]
        for result_ in param_res_xpath_list:
            result_list.append(result_)
            # 详情页参数
            originalurl_ = result_.get('linkurl', '')
            if originalurl_ != '':
                originalurl = f'http://111.7.67.183{originalurl_}'
            else:
                originalurl = ''
            originalurl_list.append(originalurl)
            # 列表页标题
            title_ = result_.get('title', "")
            if title_ != "":
                title = title_.strip()
            else:
                title = ''
            print(title)
            title_list.append(title)
            # 县区
            county_ = result_.get('xiaquname', "")
            if county_ != "":
                county = county_.strip()
            else:
                county = ''
            if ('市辖区' in county) or ('市本级' in county):
                county = ''
            print(county)
            county_list.append(county)
            publishdate_ = result_.get('webdate', "")
            if publishdate_ != "":
                publishdate_1 = publishdate_.strip().replace('年', '-').replace('月', '-').replace('日', '')
            else:
                publishdate_1 = '1970-01-01 00:00:00'
            publishdate = datetime.strptime(publishdate_1, "%Y-%m-%d %H:%M:%S")
            # 增量更新的时候做判断
            if param_result.get('day_flag'):
                if publishdate < datetime.now() - timedelta(days=10):
                    day_end_flag = True
                    break
            if publishdate_1 == '1970-01-01':
                publishdate = ''
            else:
                publishdate = publishdate
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, county_list

    def remove_js_style(self, response):
        """
        删除HTML中的js和css
        """
        try:
            encoding_ = 'utf-8'
            tree = etree.HTML(response)
            # js
            ele = tree.xpath("//script | //noscript")
            for e in ele:
                e.getparent().remove(e)
            # css
            ele2 = tree.xpath('//style | //nostyle')
            for e2 in ele2:
                e2.getparent().remove(e2)
            Html = html.tostring(tree, encoding=encoding_).decode(encoding_)
            return Html
        except Exception as err:
            print('remove_js_style报错是%s ' % err)
            return False

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            html_index = True
            for index in self.luoyangggzy_data.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html" in index["name"]:
                    html_index = False
            if temp_url_index:
                self.luoyangggzy_data.create_index([("originalurl", 1), ("html_id", 1)], unique=True, background=True)
            if temp_status_index:
                self.luoyangggzy_data.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.luoyangggzy_data.create_index([("SnapShot", 1), ("ifbprogress", 1)],
                                                   background=True)
                self.luoyangggzy_data.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.luoyangggzy_data.create_index([("update_time", 1)], background=True)
            if html_index:
                self.luoyangggzy_data.create_index([("html", 1)], background=True)
            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, county_list):
        """
        存储数据
        """
        for title, originalurl, publishdate, county in zip(
                title_list, originalurl_list, publishdate_list, county_list):
            publishyear = str(publishdate)[:4]
            ctime = datetime.now()
            uuid = ""
            self.insert_data(param_result['industry'], "", param_result['ifbprogress'], param_result['ifbprogresstag'],
                             '', '',
                             '河南省', '洛阳市', county, title, '', '',
                             publishdate, publishyear, '', '全国公共资源交易平台（河南省·洛阳市）', originalurl, '',
                             ctime, "", self.myself_ip, "Jjd", "", page, weather_have_iframe=0,
                             weather_have_image=0, weather_have_pdf=0, weather_have_pdf_type2=0, url_type='html',
                             original_website_id=75, weather_have_blank_url=0, weather_have_enclosure=0,
                             uuid=uuid, image_status=0)

    def insert_data(self, industry, industryv2, ifbprogress, ifbprogresstag, zhaocai_type, channelname, province, city,
                    county, title,
                    ifbunit, agent,
                    publishdate, publishyear, projectno, sourceplatform, originalurl, tenderaddress,
                    ctime, SnapShot, ip, executor, text_xpath, page, weather_have_iframe,
                    weather_have_image, weather_have_pdf, weather_have_pdf_type2, url_type,
                    original_website_id, weather_have_blank_url, weather_have_enclosure,
                    uuid, image_status):
        """
        插入数据
         html":res,
        "TwoLvTitle": TwoLvTitle,
        "update_time": now_time,
        "text_xpath":xpath,
        "xpath_err":xpath_err,
        "status":2
        "result_list": 列表页参数
        """
        md5_url = self.md5_url(originalurl)
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "industry": industry, "industryv2": industryv2, "ifbprogress": ifbprogress,
                    "ifbprogresstag": ifbprogresstag, "channelname": channelname,
                    "province": province, "city": city, "county": county, "title": title_strip(title),
                    "publishdate": publishdate, "publishyear": publishyear,
                    "projectno": projectno, "sourceplatform": sourceplatform,
                    "originalurl": originalurl, "md5_originalurl": md5_url, "tenderaddress": tenderaddress,
                    "ctime": ctime, "SnapShot": SnapShot, "ip": ip, "executor": executor,
                    "utime": ctime, "version_num": 1, "agent": agent, "zhaocai_type": zhaocai_type,
                    "is_parse_html": 0, "ifbunit": ifbunit, "page": page,
                    "weather_have_iframe": weather_have_iframe, "weather_have_image": weather_have_image,
                    "weather_have_pdf": weather_have_pdf, "weather_have_pdf_type2": weather_have_pdf_type2,
                    "url_type": url_type, "original_website_id": original_website_id,
                    "weather_have_enclosure": weather_have_enclosure,
                    "weather_have_blank_url": weather_have_blank_url, "uuid": uuid,
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.luoyangggzy_data.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def save_param_err(self, param_url, parma_result):
        data_dict = {
            "ifbprogress": parma_result['ifbprogress'],
            "ifbprogresstag": parma_result['ifbprogresstag'],
            "industry": parma_result['industry'],
            "channelname": parma_result['channelname'],
            "status": 0,
            "link": param_url
        }
        sql_res = self.luoyangggzy_param_err.find_one({
            "ifbprogress": parma_result['ifbprogress'],
            "ifbprogresstag": parma_result['ifbprogresstag'],
            "industry": parma_result['industry'],
            "channelname": parma_result['channelname'],
            "status": 0,
            "link": param_url
        })
        if not sql_res:
            self.luoyangggzy_param_err.insert_one(data_dict)

    def parse_xunhuan(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.luoyangggzy_param.update_one({'_id': param_result['_id']},
                                                  {'$set': {'all_start_time': datetime.now()}})
                page = 0
            else:
                page = param_result.get('page', 1)
        else:
            page = 0
        while True:
            if page == 0:
                param_url = param_result['link']
            else:
                param_url = param_result['link'].replace('moreinfo', str(page))
            print(param_url)
            equal = param_result['equal_']
            equal_ = param_result['equal_1']
            data = {
                "token": "",
                "pn": page * 10,
                "rn": 10,
                "sdt": "",
                "edt": "",
                "wd": "",
                "inc_wd": "",
                "exc_wd": "",
                "fields": "",
                "cnum": "001",
                "sort": "{\"webdate\":\"0\",\"id\":\"0\"}",
                "ssort": "",
                "cl": 200,
                "terminal": "",
                "condition": [
                    {
                        "fieldName": "categorynum",
                        "equal": f"{equal}",
                        "notEqual": None,
                        "equalList": None,
                        "notEqualList": None,
                        "isLike": True,
                        "likeType": 2
                    },
                    {
                        "fieldName": "xiaqucode",
                        "equal": f"{equal_}",
                        "notEqual": None,
                        "equalList": None,
                        "notEqualList": None,
                        "isLike": True,
                        "likeType": 0
                    }
                ],
                "time": None,
                "highlights": "",
                "statistics": None,
                "unionCondition": None,
                "accuracy": "",
                "noParticiple": "1",
                "searchRange": None,
                "noWd": True
            }
            headers = {
                "Accept": "application/json, text/javascript, */*; q=0.01",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "Origin": "http://111.7.67.183",
                "Referer": "http://111.7.67.183/jyxx/transaction.html",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest"
            }
            data = json.dumps(data, separators=(',', ':'))
            try:
                param_res = self.send_rquest_post(url=param_url, data=data, headers=headers)
            except Exception as err:
                time.sleep(random.randint(10, 15))
                # 失败链接插入到一张表里
                # self.save_param_err(param_url, param_result)
                continue
            all_number_page = self.get_all_number_page(param_res, param_result)
            # all_number_page = 5
            print('总页数是%s' % all_number_page)
            # 判断循环结束
            if page > int(all_number_page) - 1:
                self.luoyangggzy_param.update_one({'_id': param_result['_id']},
                                                  {'$set': {'page': page, 'day_flag': True,
                                                            'all_start_time': datetime.now()}})
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, county_list = self.parse_data(
                    param_res, param_result)

                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, county_list)
                if day_end_flag:
                    # 在这把数据库中的时间改成现在的时间
                    self.luoyangggzy_param.update_one({'_id': param_result['_id']},
                                                      {'$set': {'day_flag': True, 'page': 1,
                                                                'all_start_time': datetime.now()}})
                    self.m.info(
                        '%s 的%s的第%s页增量成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
                    break
                self.m.info(
                    '%s 的%s的第%s页获取数据成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
            else:
                print('没有数据')
            # 把当前爬取到的页数存在数据库里
            self.luoyangggzy_param.update_one({'_id': param_result['_id']}, {'$set': {'page': page, 'status': 1}})
            page = page + 1
            time.sleep(2)
        # 该分类爬取完毕把爬取到的页数存在数据库里
        self.luoyangggzy_param.update_one({'_id': param_result['_id']}, {'$set': {'status': 2, }})
        self.m.info('%s 的%s获取数据完毕' % (param_result['industry'], param_result['ifbprogresstag']))

    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        while True:
            param_result = self.params_queue.get()
            if not param_result:
                try:
                    self.luoyangggzy_data.bulk_write(self.local.insert_data_list)
                except Exception as err:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % err)
                break
            else:
                self.parse_xunhuan(param_result)

    def judge_xpath_err(self, detail_result):
        if (detail_result['industry'] == "工程建设" or detail_result['industry'] == "建设工程" or detail_result[
            'industry'] == "政府采购") and detail_result['ifbprogress'] == "中标公告":
            try:
                SnapShot = detail_result["SnapShot"]
            except:
                SnapShot = ""
            if SnapShot:
                image_status = 2
                xpath_err = 0
            else:
                image_status = 0
                xpath_err = 0
        else:
            image_status = 0
            xpath_err = 0
        return image_status, xpath_err

    def get_all_text(self, res, xpath):
        re_rule = re.compile("[\u4e00-\u9fa5]")
        """
        获取页面主体部分的纯文本
        """
        res = etree.HTML(res)
        res_text = "".join(res.xpath(xpath))

        all_text = "".join(re_rule.findall(res_text))
        if all_text and len(all_text) > 50:
            html_id = sha256_all_text(all_text)
            return html_id
        return ""

    def get_all_text1(self, res, xpath):
        re_rule = re.compile("[\u4e00-\u9fa5a-zA-Z0-9]")
        """
        获取页面主体部分的纯文本
        """
        res = etree.HTML(res)
        res_text = "".join(res.xpath(xpath))

        all_text = "".join(re_rule.findall(res_text))
        if all_text and len(all_text) > 50:
            html_id = sha256_all_text(all_text)
            return html_id
        return ""

    def judge_xpath(self, res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def get_file_json(self, text_xpath, html_):
        rule = re.compile("\.([^.]*)$")
        res = etree.HTML(html_)
        file_url_list1 = res.xpath(text_xpath + "//a/@href")
        file_url_list2 = res.xpath(text_xpath + "//button/@code")
        file_url_list = file_url_list1 + file_url_list2
        file_name_list1 = res.xpath(text_xpath + "//a//text()")
        file_name_list2 = res.xpath(text_xpath + "//button//text()")
        file_name_list = file_name_list1 + file_name_list2
        file_url_real_list = []
        file_name_real_list = []
        for file_url, file_name in zip(file_url_list, file_name_list):
            if ".pdf" in file_url or ".zip" in file_url or ".doc" in file_url or ".rar" in file_url or ".zip" in file_url \
                    or ".xlsx" in file_url or ".pdf" in file_name or ".zip" in file_name or ".doc" in file_name or ".rar" in file_name \
                    or ".zip" in file_name or ".xlsx" in file_name:
                file_url_real_list.append('http://jczx.wjbfwzx.com:80' + file_url)
                file_name_real_list.append(file_name)
        file_type = ["".join(rule.findall(i)) for i in file_url_real_list]
        file_json = {"files": []}
        # 不知道是啥？
        # print(file_name_real_list)
        # print(file_url_real_list)
        # print(file_type)
        for n, u, t in zip(file_name_real_list, file_url_real_list, file_type):
            if n and t and u:
                file_json["files"].append({"file_name": n, "file_url": u, "file_type": t})
        if file_json == {"files": []}:
            file_json = ""
        return file_json

    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        # 内容页获取
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            time.sleep(2)
            if not detail_result:
                try:
                    self.luoyangggzy_data.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))
                break
            else:
                print('开始更新    ' + detail_result['originalurl'])
                url = detail_result['originalurl']
                try:
                    detail_res_all = self.send_rquest_get(url=url, headers=self.headers)
                except:
                    continue
                if detail_res_all:
                    try:
                        xpath = judge_xpath(detail_res_all, self.xpaths)
                    except:
                        xpath = ''
                    if xpath != '':
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                    else:
                        continue
                        # raise Exception('xpath_err url是%s' % detail_result['originalurl'])
                    TwoLvTitle = etree.HTML(detail_res_all).xpath("//h3/text()")
                    if TwoLvTitle != []:
                        TwoLvTitle = detail_result['title']
                    html_id = self.get_all_text(detail_res_all, xpath + "//text()")
                    print('html_id是' + html_id)
                    if html_id:
                        try:
                            if detail_result['title'] == '' or html_id == '':
                                xpath_err = 1
                            else:
                                xpath_err = 0
                        except:
                            xpath_err = 1
                        # 判断该条数据在数据库中是否存在
                        detail_res = self.luoyangggzy_data.find_one(
                            {"originalurl": detail_result['originalurl'], "html_id": html_id})
                        if not detail_res:
                            file_json = self.get_file_json(xpath, detail_res_all)
                            self.local.get_detail_consumer_list.append(UpdateOne(
                                {"_id": detail_result["_id"]},
                                {"$set": {
                                    "originalurl": detail_result['originalurl'],
                                    "html": clear_html(detail_res_all),
                                    "html_id": html_id,
                                    "image_status": image_status,
                                    "TwoLvTitle": title_strip(TwoLvTitle),
                                    "utime": datetime.now(),
                                    "text_xpath": xpath,
                                    "xpath_err": xpath_err,
                                    "status": 2,
                                    "originalurl_data_from": {
                                        "url": detail_result['originalurl'],
                                        "method": "get",
                                        "request_only_data": {},
                                        "response_only_data": {}
                                    },
                                    "file_json": file_json,
                                    "Bid_data_acquisition_format": "HTML",
                                }}
                            ))
                        else:
                            self.luoyangggzy_data.update_one({"_id": detail_result["_id"]}, {"$set": {
                                "status": 2,
                                "image_status": 2,
                                "err_txt": "该数据为重复数据，无需上传截图",
                                "xpath_err": 1,
                            }})
                    else:
                        self.luoyangggzy_data.update_one({"_id": detail_result["_id"]}, {"$set": {
                            "status": 2,
                            "image_status": 2,
                            "err_txt": "没有正文",
                            "xpath_err": 1,
                        }})
                else:
                    self.luoyangggzy_data.update_one({"_id": detail_result["_id"]}, {"$set": {
                        "status": 2,
                        "image_status": 2,
                        "html": "",
                        "err_txt": "正文部分为空",
                        "xpath_err": 1,
                    }})
                if len(self.local.get_detail_consumer_list) >= 1:
                    try:
                        self.luoyangggzy_data.bulk_write(self.local.get_detail_consumer_list)
                    except Exception as e:
                        self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                    else:
                        self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                    finally:
                        self.local.get_detail_consumer_list.clear()

    def screen_shot_consumer(self):
        # 截屏
        while True:
            result = self.screen_queue.get()
            if not result:
                print("upload_images结束")
                break
            url, id = result["originalurl"], result["_id"]
            try:
                text_xpath = result["text_xpath"]
            except:
                continue
            if not text_xpath:
                continue
            upload_image_delete_pic2(url=url, coll_name="lzl_hebi_publish_data", id=id)

    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1}
        update_ = {'$set': {'status': 0}}
        self.luoyangggzy_data.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.luoyangggzy_data.find_one_and_update(filter_, update_, proj,
                                                                 return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.luoyangggzy_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.luoyangggzy_param.find_one_and_update(filter_, update_, proj,
                                                                  return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def screen_shot_product(self, flag=False):

        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.luoyangggzy_data.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogress": "中标公告"},
                                          {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.luoyangggzy_data.find_one_and_update(
                    {"SnapShot": "", "image_status": 0, "ifbprogress": "中标公告"},
                    {"$set": {"image_status": 1}},
                    {"_id": 1, "originalurl": 1, "text_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def run_thread_list(self):
        # self.get_params()

        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product  #获取截图的生产者
        ]
        self.more_thread_name_list = [
            self.get_menu_producer_consumer,  # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer         #获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        # self.get_all_type()  # 所有的筛选种类入库
        #
        # # self.add_menu_producer_queue(True)  # 获取列表的生产者
        # # self.get_menu_producer_consumer() # 获取列表的消费者
        #
        # self.add_detail_to_queue(True)  # 获取详情的生产者
        # self.get_detail_consumer()  # 获取详情的消费者
        #
        # self.screen_shot_product(True)  # 获取截图的生产者
        # self.screen_shot_consumer()  # 获取截图的消费者


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    Luoyangggzy = Luoyangggzy(db_name)
    # Luoyangggzy.get_params()
    Luoyangggzy.run()
