#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-

import json
import math
import random
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue

import requests

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument

from tender_project. base_spider import BaseSpider
from tender_project.conf.conf_util import get_all_text, get_file_json, title_strip, upload_image_delete_pic2, send_request, \
    sha256_all_text, clear_html, parse_xpath, judge_xpath
from tender_project.conf.database import DATABASE
from tender_project.conf.dber import MongoDBer
from tender_project.conf.logging_debug import MyLogging


# 批量更新
class huaibeiggzy(BaseSpider):
    def __init__(self, db_name):
        super(huaibeiggzy, self).__init__()
        self.headers = {
            # "Cookie": "JSESSIONID=325F32A63F535B0ECC371B5881FAAE44; jfe_pin=edb5223c; jfe_ts=1667356652.668; jfe_sn=cj4oWRHJrJdcLhbDnSWPd5qP5hU=",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
 }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.huaibei_param = self.db_m["huaibei_param"]
        self.huaibei_param_err = self.db_m["huaibei_param_err"]
        self.huaibei = self.db_m["huaibei_data"]
        self.xpaths = ["//div[@class='ewb-article-info']", "//div[@class='ewb-project-left']"]
        # 日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()  # 获得本机ip

    def get_params(self):
        """
        获取筛选参数的流程
        """
        param_dict = {
            "工程建设": {
                "招标项目计划": ["002001013", 1, "招标公告", "工程建设",'72'],
                "项目登记": ["002001006", 1, "招标公告", "工程建设",'72'],
                "招标公告": ["002001001", 1, "招标公告", "工程建设",'72'],
                "变更公告": ["002001003", 1, "招标公告", "工程建设",'72'],
                "开标信息": ["002001013", 1, "招标公告", "工程建设",'72'],
                "中标候选人公示": ["002001005", 1, "中标公告", "工程建设",'72'],
                "中标结果公告": ["002001002", 1, "中标公告", "工程建设",'72'],
                "投标保证金退还": ["002001008", 1, "招标公告", "工程建设",'72'],
                "中标通知书": ["002001009", 1, "中标公告", "工程建设",'72'],
                "合同信息": ["002001004", 1, "中标公告", "工程建设",'72'],
                "履约信息": ["002001012", 1, "中标公告", "工程建设",'72'],
                "招标异常": ["002001010", 1, "招标公告", "工程建设",'72']
               },
            "政府采购": {
                "项目登记": ["002002004", 1, "招标公告", "政府采购",'1'],
                "采购公告": ["002002001", 1, "招标公告", "政府采购",'1'],
                "更正公告": ["002002003", 1, "招标公告", "政府采购",'1'],
                "开标记录": ["002002007", 1, "招标公告", "政府采购",'1'],
                "中标（成交）结果公告": ["002002002", 1, "中标公告", "政府采购",'1'],
                "中标通知书": ["002002011", 1, "中标公告", "政府采购", '1'],
                "投标保证金退还": ["002002008", 1, "中标公告", "政府采购", '1'],
                "合同公告": ["002002005", 1, "中标公告", "政府采购", '1'],
                "终止公告": ["002002009", 1, "中标公告", "政府采购", '1'],
            },
            "国有产权": {
                "交易公告": ["002003001", 1, "招标公告", "国有产权", '125'],
                "成交结果公示": ["002003002", 1, "中标公告", "国有产权", '125'],
                "答疑/变更公告": ["002003003", 1, "招标公告", "国有产权", '125'],
                "合同公开": ["002003004", 1, "中标公告", "国有产权", '125'],
            },
            "土地矿权": {
                "交易公告": ["002004001", 1, "招标公告", "土地矿权", '125'],
                "结果公示": ["002004002", 1, "中标公告", "土地矿权", '106']
            },
            "其他交易": {
                "采购意向公开": ["002006005", 1, "采购意向", "采购意向",'213'],
                "交易公告": ["002006001", 1, "招标公告", "其他交易",'213'],
                "答疑/更正公告": ["002006002", 1, "招标公告", "其他交易",'213'],
                "结果公告": ["002006003", 1, "中标公告", "其他交易",'213'],
                "合同公告": ["002006004", 1, "中标公告", "其他交易",'213']
            },

        }
        for industry in param_dict:
            for ifbprogress in param_dict[industry]:
                data_dict = {
                    "ifbprogress": param_dict[industry][ifbprogress][2],
                    "ifbprogresstag": ifbprogress,
                    "industry": param_dict[industry][ifbprogress][3],
                    "channelname": '',
                    "link": param_dict[industry][ifbprogress][0],
                    "page": 1,
                    "status": 0
                }
                sql_res = self.huaibei_param.find_one({
                    "ifbprogress": param_dict[industry][ifbprogress][2],
                    "ifbprogresstag": ifbprogress,
                    "industry": param_dict[industry][ifbprogress][3],
                    "channelname": '',
                    "link": param_dict[industry][ifbprogress][0],
                })
                if not sql_res:
                    self.huaibei_param.insert_one(data_dict)

    def get_all_number_page(self, res):
        """
        获得总页数
        """
        res_ = json.loads(res)
        all_number_total_ = res_.get('RowCount', '')
        if all_number_total_ != '':
            all_number_page = math.ceil(int(all_number_total_)/12)
        else:
            all_number_page = 1
        return int(all_number_page)


    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = False
        title_list = []
        originalurl_list = []
        publishdate_list = []
        ifbunit_list = []
        param_res_xpath = json.loads(param_res)
        param_res_xpath_list = param_res_xpath['Table']
        for result_ in param_res_xpath_list:
            originalurl_ = result_.get('infourl', '')
            if originalurl_ != '':
                originalurl = 'https://ggzy.huaibei.gov.cn' + str(originalurl_)
            else:
                originalurl = ''
            originalurl_list.append(originalurl)
            title_ = result_.get('title', '')
            if title_ != '':
                title = title_.strip().replace('<font color="#FF0000">', '').replace('<html>', '').replace('</html>', '').replace("<font color='#33FF00'>", '').replace('</font>', '').replace("<font color='red'>", '').replace("[报名中]", '').replace("<font color='#FF0000'>", '')
            else:
                title = ''
            print(title)
            title_list.append(title)
            ifbunit_list.append('')
            publishdate_ = result_.get('infodate', '')
            if publishdate_ != '':
                publishdate_1 = publishdate_.strip().replace('年', '-').replace('月', '-').replace('日', '')

            else:
                publishdate_1 = '1970-01-01'
            publishdate = datetime.strptime(publishdate_1,"%Y-%m-%d")
            # 增量更新的时候做判断
            if param_result.get('day_flag'):
                if publishdate < datetime.now() - timedelta(days=70):
                    day_end_flag = True
                    break
            if publishdate_1 == '1970-01-01':
                publishdate = ''
            else:
                publishdate = publishdate
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list

    def remove_js_style(self, response):
        '''
        删除HTML中的js和css
        '''
        try:
            encoding_ = 'utf-8'
            tree = etree.HTML(response)
            ele = tree.xpath("//script | //noscript")
            for e in ele:
                e.getparent().remove(e)
            ele2 = tree.xpath('//style | //nostyle')
            for e2 in ele2:
                e2.getparent().remove(e2)
            Html = html.tostring(tree, encoding=encoding_).decode(encoding_)
            return Html
        except Exception as err:
            print('remove_js_style报错是%s ' % err)
            return False

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            html_index = True
            for index in self.huaibei.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html" in index["name"]:
                    html_index = False
            if temp_url_index:
                self.huaibei.create_index([("originalurl", 1), ("html_id", 1)], unique=True, background=True)
            if temp_status_index:
                self.huaibei.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.huaibei.create_index([("SnapShot", 1), ("ifbprogress", 1)],
                                         background=True)
                self.huaibei.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.huaibei.create_index([("update_time", 1)], background=True)
            if html_index:
                self.huaibei.create_index([("html", 1)], background=True)
            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def judge_ifbprogress(self, title):
        if '成交' in title or '结果' in title or '中标' in title or '中标' in title or '废标' in title:
            ifbprogress = '中标公告'
        else:
            ifbprogress = '招标公告'
        return ifbprogress

    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list ):
        """
        存储数据
        """
        for title, originalurl, publishdate, ifbunit in zip(
                title_list, originalurl_list, publishdate_list, ifbunit_list):
            publishyear = str(publishdate)[:4]
            ctime = datetime.now()
            uuid = ""
            if param_result['ifbprogress'] == '':
                ifbprogress = self.judge_ifbprogress(title)
            else:
                ifbprogress = param_result['ifbprogress']
            if '>' in title:
                title_1 = str(title).split('>')[1]
            else:
                title_1 = title
            print(title_1)
            self.insert_data(param_result['industry'], "", ifbprogress, param_result['ifbprogresstag'],param_result['channelname'],
                             '安徽省', '淮北市', '', title_1, '', '',
                             publishdate, publishyear, '', '全国公共资源交易平台（安徽省·淮北市）', originalurl, '',
                             ctime, "", self.myself_ip, "GU", "", page, weather_have_iframe=0,
                             weather_have_image=0, weather_have_pdf=0, weather_have_pdf_type2=0, url_type='html',
                             original_website_id=75, weather_have_blank_url=0, weather_have_enclosure=0,
                             uuid=uuid, image_status=0)

    def insert_data(self, industry, industryv2, ifbprogress, ifbprogresstag, channelname, province, city, county,title,
                    ifbunit, agent,
                    publishdate, publishyear, projectno, sourceplatform,originalurl, tenderaddress,
                    ctime, SnapShot, ip, executor, text_xpath, page, weather_have_iframe,
                    weather_have_image, weather_have_pdf, weather_have_pdf_type2, url_type,
                    original_website_id, weather_have_blank_url, weather_have_enclosure,
                    uuid, image_status):
        """
        插入数据
         html":res,
        "TwoLvTitle": TwoLvTitle,
        "update_time": now_time,
        "text_xpath":xpath,
        "xpath_err":xpath_err,
        "status":2
        """
        md5_url = self.md5_url(originalurl)
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "industry": industry, "industryv2": industryv2, "ifbprogress": ifbprogress,
                    "ifbprogresstag": ifbprogresstag, "channelname": channelname,
                    "province": province, "city": city, "county": county, "title": title_strip(title),
                    "publishdate": publishdate, "publishyear": publishyear,
                    "projectno": projectno, "sourceplatform": sourceplatform,
                    "originalurl": originalurl, "md5_originalurl": md5_url, "tenderaddress": tenderaddress,
                    "ctime": ctime, "SnapShot": SnapShot, "ip": ip, "executor": executor,
                    "utime": ctime, "version_num": 1, "agent": agent,
                    "is_parse_html": 0, "ifbunit": ifbunit, "page": page,
                    "weather_have_iframe": weather_have_iframe, "weather_have_image": weather_have_image,
                    "weather_have_pdf": weather_have_pdf, "weather_have_pdf_type2": weather_have_pdf_type2,
                    "url_type": url_type, "original_website_id": original_website_id,
                    "weather_have_enclosure": weather_have_enclosure,
                    "weather_have_blank_url": weather_have_blank_url, "uuid": uuid,
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.huaibei.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def save_param_err(self, param_url, parma_result):
        data_dict = {
            "ifbprogress": parma_result['ifbprogress'],
            "ifbprogresstag": parma_result['ifbprogresstag'],
            "industry": parma_result['industry'],
            "channelname": parma_result['channelname'],
            "status": 0,
            "link": param_url
        }
        sql_res = self.huaibei_param_err.find_one({
             "ifbprogress": parma_result['ifbprogress'],
            "ifbprogresstag": parma_result['ifbprogresstag'],
            "industry": parma_result['industry'],
            "channelname": parma_result['channelname'],
            "status": 0,
            "link": param_url
        })
        if not sql_res:
            self.huaibei_param_err.insert_one(data_dict)

    def parse_xunhuan(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.huaibei_param.update_one({'_id': param_result['_id']},
                                             {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
        }
        url = "https://ggzy.huaibei.gov.cn/EpointWebBuilder/rest/GgSearchAction/getInfoMationList"
        data = {
            "params": "{\"siteGuid\":\"7eb5f7f1-9041-43ad-8e13-8fcb82ea831a\",\"categoryNum\":\"" + str(param_result[
                                                                                                            'link']) + "\",\"keyword\":\"\",\"startDate\":\"\",\"endDate\":\"\",\"publishDate\":\"\",\"area\":\"\",\"tradeType\":\"\",\"pageIndex\":" + str(
                1) + ",\"pageSize\":12}"
        }
        res1 = requests.post(url, headers=headers, data=data, verify=False).content.decode('utf-8')
        all_number_page = self.get_all_number_page(res1)
        while True:
            url = "https://ggzy.huaibei.gov.cn/EpointWebBuilder/rest/GgSearchAction/getInfoMationList"
            data = {
                "params": "{\"siteGuid\":\"7eb5f7f1-9041-43ad-8e13-8fcb82ea831a\",\"categoryNum\":\""+str(param_result['link'])+"\",\"keyword\":\"\",\"startDate\":\"\",\"endDate\":\"\",\"publishDate\":\"\",\"area\":\"\",\"tradeType\":\"\",\"pageIndex\":"+str(page)+",\"pageSize\":12}"
            }

            try:
                param_res = requests.post(url, headers=headers, data=data, verify=False).content.decode('utf-8')
            except Exception as err:
                time.sleep(random.randint(10, 15))
                # 失败链接插入到一张表里
                # self.save_param_err(param_url, param_result)
                continue

            print('总页数是%s' % all_number_page)
            # 判断循环结束
            if page > int(all_number_page):
                self.huaibei_param.update_one({'_id': param_result['_id']},
                                             {'$set': {'page': page, 'day_flag': True,
                                                       'all_start_time': datetime.now()}})
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list = self.parse_data(
                    param_res, param_result)

                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list)
                if day_end_flag:
                    # 在这把数据库中的时间改成现在的时间
                    self.huaibei_param.update_one({'_id': param_result['_id']},
                                                 {'$set': {'day_flag': True, 'page': 1,
                                                           'all_start_time': datetime.now()}})
                    self.m.info('%s 的%s的第%s页增量成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
                    break
                self.m.info('%s 的%s的第%s页获取数据成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
            else:
                print('没有数据')
            # 把当前爬取到的页数存在数据库里
            self.huaibei_param.update_one({'_id': param_result['_id']}, {'$set': {'page': page, 'status': 1}})
            page = page + 1
            time.sleep(random.randint(5, 8))
        # 该分类爬取完毕把爬取到的页数存在数据库里
        self.huaibei_param.update_one({'_id': param_result['_id']}, {'$set': {'status': 2, }})
        self.m.info('%s 的%s获取数据完毕' % (param_result['industry'], param_result['ifbprogresstag']))

    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        while True:
            param_result = self.params_queue.get()
            if not param_result:
                try:
                    self.huaibei.bulk_write(self.local.insert_data_list)
                except Exception as err:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % err)
                break
            else:
                self.parse_xunhuan(param_result)

    def judge_xpath_err(self, detail_result):
        if (detail_result['industry'] == "工程建设" or detail_result['industry'] == "建设工程" or detail_result[
            'industry'] == "政府采购") and detail_result['ifbprogress'] == "中标公告":
            try:
                SnapShot = detail_result["SnapShot"]
            except:
                SnapShot = ""
            if SnapShot:
                image_status = 2
                xpath_err = 0
            else:
                image_status = 0
                xpath_err = 0
        else:
            image_status = 0
            xpath_err = 0
        return image_status, xpath_err

    def get_all_text(self, res, xpath):
        re_rule = re.compile("[\u4e00-\u9fa5]")
        """
        获取页面主体部分的纯文本
        """
        res = etree.HTML(res)
        res_text = "".join(res.xpath(xpath))

        all_text = "".join(re_rule.findall(res_text))
        if all_text and len(all_text) > 50:
            html_id = sha256_all_text(all_text)
            return html_id
        return ""

    def get_all_text1(self, res, xpath):
        re_rule = re.compile("[\u4e00-\u9fa5a-zA-Z0-9]")
        """
        获取页面主体部分的纯文本
        """
        res = etree.HTML(res)
        res_text = "".join(res.xpath(xpath))

        all_text = "".join(re_rule.findall(res_text))
        if all_text and len(all_text) > 50:
            html_id = sha256_all_text(all_text)
            return html_id
        return ""

    def judge_xpath(self, res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def get_file_json(self, text_xpath, html_):
        rule = re.compile("\.([^.]*)$")
        res = etree.HTML(html_)
        file_url_list1 = res.xpath(text_xpath + "//a/@href")
        file_url_list2 = res.xpath(text_xpath + "//button/@code")
        file_url_list = file_url_list1 + file_url_list2
        file_name_list1 = res.xpath(text_xpath + "//a//text()")
        file_name_list2 = res.xpath(text_xpath + "//button//text()")
        file_name_list = file_name_list1 + file_name_list2
        file_url_real_list = []
        file_name_real_list = []
        for file_url, file_name in zip(file_url_list, file_name_list):
            if ".pdf" in file_url or ".zip" in file_url or ".doc" in file_url or ".rar" in file_url or ".zip" in file_url \
                    or ".xlsx" in file_url or ".pdf" in file_name or ".zip" in file_name or ".doc" in file_name or ".rar" in file_name \
                    or ".zip" in file_name or ".xlsx" in file_name:
                file_url_real_list.append('http://jczx.huaibeifwzx.com:80'+file_url)
                file_name_real_list.append(file_name)
        file_type = ["".join(rule.findall(i)) for i in file_url_real_list]
        file_json = {"files": []}
        print(file_name_real_list)
        print(file_url_real_list)
        print(file_type)
        for n, u, t in zip(file_name_real_list, file_url_real_list, file_type):
            if n and t and u:
                file_json["files"].append({"file_name": n, "file_url": u, "file_type": t})
        if file_json == {"files": []}:
            file_json = ""
        return file_json

    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            time.sleep(random.randint(1, 2))
            if not detail_result:
                try:
                    self.huaibei.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))
                break
            else:
                try:
                    detail_res_ = self.send_rquest_get(detail_result['originalurl'], headers=self.headers)
                except:
                    continue
                if detail_res_:
                    print(detail_res_)
                    raise
                    xpath = judge_xpath(detail_res_, self.xpaths)
                    if xpath != '':
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                    else:
                        # image_status, xpath_err = 2, 1
                        raise Exception('xpath_err url是%s' % detail_result['originalurl'])
                    TwoLvTitle_ =etree.HTML(detail_res_).xpath('//h2/text()')
                    if TwoLvTitle_ != []:
                        TwoLvTitle = TwoLvTitle_[0]
                    else:
                        TwoLvTitle = ''
                    html_id = self.get_all_text(detail_res_, xpath + "//text()")
                    print('html_id是'+html_id)
                    if html_id:
                        # 判断该条数据在数据库中是否存在
                        detail_res = self.huaibei.find_one(
                            {"originalurl": detail_result['originalurl'], "html_id": html_id})
                        if not detail_res:
                            file_json = self.get_file_json(xpath, detail_res_)
                            self.local.get_detail_consumer_list.append(UpdateOne(
                                {"_id": detail_result["_id"]},
                                {"$set": {
                                    "originalurl": detail_result['originalurl'],
                                    "html": clear_html(detail_res_),
                                    "html_id": html_id,
                                    "image_status": image_status,
                                    "TwoLvTitle": title_strip(TwoLvTitle),
                                    "utime": datetime.now(),
                                    "text_xpath": xpath,
                                    "xpath_err": 0,
                                    "status": 2,
                                    "originalurl_data_from": {
                                        "url": detail_result['originalurl'],
                                        "method": "get",
                                        "request_only_data": {},
                                        "response_only_data": {}
                                    },
                                    "file_json": file_json,
                                    "Bid_data_acquisition_format": "HTML",
                                }}
                            ))
                        else:
                            self.huaibei.update_one({"_id": detail_result["_id"]
                                                    }, {"$set": {
                                "status": 2,
                                "image_status": 2,
                                "err_txt": "该数据为重复数据，无需上传截图",
                                "xpath_err": 1,
                            }})


                else:
                    self.huaibei.update_one({"_id": detail_result["_id"]}, {"$set": {
                        "status": 2,
                        "image_status": 2,
                        "html": "",
                        "err_txt": "正文部分为空",
                        "xpath_err": 1,
                    }})
                if len(self.local.get_detail_consumer_list) >= 1:
                    try:
                        self.huaibei.bulk_write(self.local.get_detail_consumer_list)
                    except Exception as e:
                        self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                    else:
                        self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                    finally:
                        self.local.get_detail_consumer_list.clear()

    def screen_shot_consumer(self):
        while True:
            result = self.screen_queue.get()
            if not result:
                print("upload_images结束")
                break
            url, id = result["originalurl"], result["_id"]
            try:
                text_xpath = result["text_xpath"]
            except:
                continue
            if not text_xpath:
                continue
            upload_image_delete_pic2(url=url, coll_name="lzl_hebi_publish_data", id=id)

    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1}
        update_ = {'$set': {'status': 0}}
        self.huaibei.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.huaibei.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.huaibei_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.huaibei_param.find_one_and_update(filter_, update_, proj,
                                                             return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def screen_shot_product(self, flag=False):

        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.huaibei.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogress": "中标公告"},
                                {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.huaibei.find_one_and_update(
                    {"SnapShot": "", "image_status": 0, "ifbprogress": "中标公告"},
                    {"$set": {"image_status": 1}},
                    {"_id": 1, "originalurl": 1, "text_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def run_thread_list(self):
        # self.get_params()

        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product  #获取截图的生产者

        ]
        self.more_thread_name_list = [
            self.get_menu_producer_consumer,  # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer         #获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        # self.get_all_type()  # 所有的筛选种类入库
        #
        # # self.add_menu_producer_queue(True)  # 获取列表的生产者
        # # self.get_menu_producer_consumer() # 获取列表的消费者
        #
        # self.add_detail_to_queue(True)  # 获取详情的生产者
        # self.get_detail_consumer()  # 获取详情的消费者
        #
        # self.screen_shot_product(True)  # 获取截图的生产者
        # self.screen_shot_consumer()  # 获取截图的消费者


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    huaibeiggzy = huaibeiggzy(db_name)
    huaibeiggzy.get_params()
    huaibeiggzy.run()