#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
"""
@author:    lizhiheng
@date:      2021/12/27
@software:  PyCharm
@file:      lzl_SiChuanxh_publish_data.py
@project:   tender_project
@time:      14:35
@user:      Administrator
"""
import re
from datetime import datetime, timedelta, date
import json
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue
import requests
from pymongo.errors import AutoReconnect
from retry import retry

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument
# requests.packages.urllib3.disable_warnings()
from base_spider import BaseSpider
from conf.conf_util import get_all_text, get_file_json, title_strip, upload_image_delete_pic2, send_request, \
    sha256_all_text, clear_html, parse_xpath
from conf.database import DATABASE
from conf.dber import MongoDBer
from conf.logging_debug import MyLogging

# 批量更新
class SiChuanxh(BaseSpider):
    def __init__(self, db_name):
        super(SiChuanxh, self).__init__()
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
           "Cookie": "userGuid=1047923792"
            }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.sc_param = self.db_m["lzl_sichuan_publish_xh_param"]
        self.sc = self.db_m["lzl_sichuan_publish_xh_data"]
        self.xpaths = ["//div[@class='clearfix'][1]", "//div[@class='clearfix xh-highlight'][1]"]
        #日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip() #获得本机ip

    def get_params(self):
        """
        获取筛选参数的流程
        """
        param_dict = {
            "代理机构比选": {
                "比选公告": ["比选公告", "002008001", 0, "招标公告"],
                "更正公告": ["更正公告", "002008002", 0, "招标公告"],
                "结果公示": ["结果公示", "002008003", 1, "中标公告"],
                "终止公告": ["终止公告", "002008004", 1, "招标公告"],
                "签约履行": ["签约履行", "002008005", 1, "中标公告"],
            },

            "政府采购": {
                "采购公告": ["采购公告", "002002001", 0, "招标公告"],
                "更正公告": ["更正公告", "002002002", 0, "招标公告"],
                "中标公告": ["中标公告", "002002003", 0, "中标公告"],
                "签约履行": ["签约履行", "002002004", 0, "中标公告"],
                "终止公告": ["终止公告", "002002005", 0, "中标公告"],
            },
            "国有产权": {
                "交易公告": ["交易公告", "002003001", 0, "招标公告"],
                "更正公告": ["更正公告", "002003005", 0, "招标公告"],
                "交易结果": ["交易结果", "002003002", 0, "中标公告"],
                # "开拍视频": ["开拍视频", "002003004", 0, "招标公告"],
                "中止/终止公告": ["中止/终止公告", "002003003", 0, "中标公告"],
            },
            "土地使用权": {
                "出让公示": ["出让公示", "002004001", 0, "招标公告"],
                "公告变更": ["公告变更", "002004002", 0, "招标公告"],
                "成交宗地": ["成交宗地", "002004003", 0, "中标公告"],
                "终止公告": ["终止公告", "002004004", 0, "中标公告"],
            },
            "矿业权": {
                "出让公告": ["出让公示", "002005001", 0, "招标公告"],
                "出让结果": ["公告变更", "002005002", 0, "中标公告"],
                "公开信息": ["成交宗地", "002005003", 0, "招标公告"],
                "登记公告信息": ["终止公告", "002005004", 0, "招标公告"],
                # "正在挂牌": ["成交宗地", "002005006", 0, "中标公告"],
                "终止公告": ["终止公告", "002005005", 0, "中标公告"],
            },
            "药品药械": {
                "公告信息": ["公告信息", "002006001", 0, "招标公告"],
                "交易结果": ["交易结果", "002006002", 0, "中标公告"],
            },
            "其他类别": {
                "公告信息": ["公告信息", "002007001", 0, "招标公告"],
                "交易结果": ["交易结果", "002007002", 0, "中标公告"],
                "终止公告": ["终止公告", "002007003", 0, "招标公告"],
            },
            "工程建设": {
                "招标公告": ["招标公告", "002001001", 0, "招标公告"],
                "资格预审补遗/澄清": ["资格预审补遗", "002001002", 0, "招标公告"],
                "招标文件补遗/澄清": ["招标文件补遗", "002001003", 0, "招标公告"],
                "流标或终止公告": ["流标或终止公告", "002001004", 0, "招标公告"],
                "开标记录": ["开标记录", "002001005", 0, "招标公告"],
                "评标结果公示": ["评标结果公示", "002001006", 0, "中标公告"],
                "中标结果公示": ["中标结果公示", "002001008", 0, "中标公告"],
                "签约履行": ["签约履行", "002001007", 0, "中标公告"],

            }
        }
        city_list = ['四川省', '成都市', '德阳市', '绵阳市', '内江市', '乐山市', '广元市', '眉山市', '自贡市', '雅安市', '宜宾市',
                     '攀枝花市', '泸州市', '遂宁市','广安市', '南充市', '达州市', '资阳市', '巴中市', '阿坝州', '甘孜州', '凉山州', ]
        city_value_list = ['S001', 'S002', 'S003', 'S004', 'S005', 'S006', 'S007', 'S008', 'S009', 'S010', 'S011',
                           'S012', 'S013', 'S014', 'S015', 'S016', 'S017', 'S018', 'S019', 'S020', 'S021', 'S022']

        for industry in param_dict:
            for ifbprogresstag in param_dict[industry]:
                for i in range(len(city_list)):
                    data_dict = {
                        "ifbprogress": param_dict[industry][ifbprogresstag][3],
                        "ifbprogresstag": ifbprogresstag,
                        "equal": param_dict[industry][ifbprogresstag][1],
                        "industry": industry,
                        "industryv2": "",
                        "industryv2_value": "",
                        "city": city_list[i],
                        "city_value": city_value_list[i],
                        "page": 1,
                        "status": 0
                    }
                    sql_res = self.sc_param.find_one({
                        "ifbprogresstag": param_dict[industry][ifbprogresstag][3],
                        "ifbprogress": ifbprogresstag,
                        "equal": param_dict[industry][ifbprogresstag][1],
                        "industry": industry,
                        "industryv2": "",
                        "industryv2_value": "",
                        "city": city_list[i],
                        "city_value": city_value_list[i],
                    })
                    if not sql_res:
                        self.sc_param.insert_one(data_dict)


    def get_all_number_page(self, res):
        """
        获得总页数
        """
        all_page_number_ = json.loads(res)
        all_number_total = all_page_number_['result']['totalcount']
        if int(all_number_total) % 100 > 0:
            all_number_page = int(all_number_total) // 100 + 1
        else:
            all_number_page = int(all_number_total) // 100
        return all_number_page

    @retry(AutoReconnect, tries=4, delay=1)
    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = False
        title_list = []
        originalurl_list = []
        publishdate_list = []
        result_json_list = []
        source_list = []
        param_id_list = []
        param_res_json_list = json.loads(param_res)['result']['records']
        for result_ in param_res_json_list:
            originalurl_ = result_.get('linkurl','')
            originalurl = 'http://ggzyjy.sc.gov.cn'+originalurl_
            originalurl_list.append(originalurl)
            title_ = result_.get('title','')
            title_list.append(title_)
            publishdate_ = result_.get('infodate','')
            publishdate = datetime.strptime(publishdate_,"%Y-%m-%d %H:%M:%S")
            # 增量更新的时候做判断
            if param_result.get('day_flag'):
                if publishdate < param_result['all_start_time']:
                    day_end_flag = True
                    break
            publishdate_list.append(publishdate)
            #来源
            source_ = result_.get('zhuanzai', '')[0]
            source_list.append(source_)
            param_id = result_.get('id', '')
            param_id_list.append(param_id)
            result_json_list.append(result_)
        return title_list, originalurl_list, publishdate_list, day_end_flag, result_json_list, source_list, param_id_list

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            html_index = True
            for index in self.sc.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html_id" in index["name"]:
                    html_id_index = False
                if "html" in index["name"]:
                    html_index = False
            if temp_url_index:
                self.sc.create_index([("originalurl", 1)], background=True)
            if temp_status_index:
                self.sc.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.sc.create_index([("industry", 1), ("SnapShot", 1), ("ifbprogresstag", 1)],
                                                  background=True)
                self.sc.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogresstag", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.sc.create_index([("update_time", 1)], background=True)
            if html_index:
                self.sc.create_index([("html", 1)], background=True)
            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    @retry(AutoReconnect, tries=4, delay=1)
    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, result_json_list, source_list, param_id_list):
        """
        存储数据
        """
        for title, originalurl, publishdate, result_json, source, param_id in zip(
                title_list, originalurl_list, publishdate_list, result_json_list, source_list, param_id_list):
            publishyear = str(publishdate)[:4]
            ctime = datetime.now()
            uuid = ""
            if '市' in param_result['city'] or '州' in param_result['city']:
                city = param_result['city']
            else:
                city = ''
            industryv2 = param_result['industryv2']

            self.insert_data(param_result['industry'], industryv2, param_result['ifbprogresstag'], param_result['ifbprogress'], "", "四川省", city, title,
                             publishdate, publishyear, '', '四川省公共资源交易协会', originalurl, '四川省-'+city,
                             ctime, "", self.myself_ip, "GU", "", "", page, weather_have_iframe=0,
                             weather_have_image=0, weather_have_pdf=0, weather_have_pdf_type2=0, url_type='api',
                             original_website_id=63, weather_have_blank_url=0, weather_have_enclosure=0,
                             uuid=uuid, image_status=0, result_json=result_json,param_id=param_id)

    @retry(AutoReconnect, tries=4, delay=1)
    def insert_data(self,industry, industryv2, ifbprogresstag, ifbprogress, channelname, province, city, title,
                    publishdate, publishyear, projectno, sourceplatform, originalurl, tenderaddress,
                    ctime, SnapShot, ip, executor, ifbunit, tesc_xpath, page, weather_have_iframe,
                    weather_have_image,weather_have_pdf,weather_have_pdf_type2,url_type,
                    original_website_id, weather_have_blank_url,weather_have_enclosure,
                    uuid, image_status,result_json,param_id):
        """
        插入数据
         html":res,
        "TwoLvTitle": TwoLvTitle,
        "update_time": now_time,
        "tesc_xpath":xpath,
        "xpath_err":xpath_err,
        "status":2
        """
        md5_url = self.md5_url(originalurl)
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "industry": industry, "industryv2": industryv2, "ifbprogresstag": ifbprogresstag,
                    "ifbprogress": ifbprogress, "channelname": channelname,
                    "province": province, "city": city, "title": title_strip(title),
                    "publishdate": publishdate, "publishyear": publishyear,
                    "projectno": projectno, "sourceplatform": sourceplatform,
                    "originalurl": originalurl, "md5_originalurl": md5_url, "tenderaddress": tenderaddress,
                    "ctime": ctime, "SnapShot": SnapShot, "ip": ip, "executor": executor,
                    "utime": ctime, "version_num": 1,
                    "is_parse_html": 0, "ifbunit": ifbunit, "page": page,
                    "weather_have_iframe": weather_have_iframe, "weather_have_image": weather_have_image,
                    "weather_have_pdf": weather_have_pdf, "weather_have_pdf_type2": weather_have_pdf_type2,
                    "url_type": url_type, "original_website_id": original_website_id,
                    "weather_have_enclosure": weather_have_enclosure,
                    "weather_have_blank_url": weather_have_blank_url, "uuid": uuid, "result_": result_json,"param_id":param_id
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.sc.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def get_form_data(self,param_result,page):
        end_time = str(date.today())+' 23:59:59'
        if param_result !='工程建设':
            form_data = {"token": "", "pn": (int(page)-1)*100, "rn": 100, "sdt": "", "edt": "", "wd": "", "inc_wd": "", "exc_wd": "",
                         "fields": "title",
                         "cnum": "", "sort": "{'webdate':'0'}",
                         "ssort": "title", "cl": 500, "terminal": "",
                         "condition": [
                             {"fieldName": "tradingsourcevalue", "equal": param_result['city_value'], "notEqual": None,
                              "equalList": None,
                              "notEqualList": None},
                             {"fieldName": "categorynum", "equal": str(param_result['equal']), "notEqual": None,
                              "equalList": None,
                              "notEqualList": None, "isLike": True, "likeType": 2}],
                         "time": [{"fieldName": "webdate", "startTime": "2001-1-1 00:00:00",
                                   "endTime": end_time}],
                         "highlights": "", "statistics": None, "unionCondition": None, "accuracy": "",
                         "noParticiple": "0",
                         "searchRange": None, "isBusiness": "1"}
        else:
            form_data = {"token": "", "pn": (int(page)-1)*100, "rn": 100, "sdt": "", "edt": "", "wd": "", "inc_wd": "", "exc_wd": "",
                         "fields": "title", "cnum": "",
                         "sort": "{'webdate':'0'}", "ssort": "title", "cl": 500, "terminal": "",
                         "condition": [
                             {"fieldName": "tradingsourcevalue", "equal": param_result['city_value'], "notEqual": None, "equalList": None,
                              "notEqualList": None},
                             {"fieldName": "categorynum", "equal": str(param_result['equal']), "notEqual": None, "equalList": None,
                              "notEqualList": None, "isLike": True, "likeType": 2},
                             {"fieldName": "tradetypevalue", "equal": param_result['industryv2_value'], "notEqual": None, "equalList": None,
                              "notEqualList": None}],
                         "time": [{"fieldName": "webdate", "startTime": "2001-1-1 00:00:00",
                                   "endTime": end_time}], "highlights": "",
                         "statistics": None, "unionCondition": None, "accuracy": "", "noParticiple": "0",
                         "searchRange": None, "isBusiness": "1"}
        return form_data

    @retry(AutoReconnect, tries=4, delay=1)
    def parse_xunhuan(self,param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.sc_param.update_one({'_id': param_result['_id']},
                                          {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        form_data = self.get_form_data(param_result, 1)
        param_url = 'http://ggzyjy.sc.gov.cn/inteligentsearch/rest/inteligentSearch/getFullTextData'
        param_res = send_request(method='POST', url=param_url, attr='json', data=form_data, headers=self.headers)
        # 获得最大页数
        all_number_page = self.get_all_number_page(param_res)
        while True:
            form_data = self.get_form_data(param_result, page)
            param_url = 'http://ggzyjy.sc.gov.cn/inteligentsearch/rest/inteligentSearch/getFullTextData'
            param_res = send_request(method='POST', url=param_url, attr='json', data=form_data, headers=self.headers)
            if page > int(all_number_page):
                self.sc_param.update_one({'_id': param_result['_id']},
                                          {'$set': {'page': page, 'day_flag': True,'all_start_time': datetime.now()}})
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, result_json_list, source_list, param_id_list \
                    = self.parse_data(param_res, param_result)
                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, result_json_list, source_list, param_id_list)
                if day_end_flag:
                    # 在这把数据库中的时间改成现在的时间
                    self.sc_param.update_one({'_id': param_result['_id']},
                                              {'$set': {'day_flag': True, 'page': 1, 'all_start_time': datetime.now()}})
                    self.m.info('%s 的%s的%s的%s的第%s页增量成功' % (param_result['industry'], param_result['ifbprogress'], param_result['city'], param_result['industryv2'], page))
                    break
                self.m.info('%s 的%s的%s的%s的第%s页获取数据成功' % (param_result['industry'], param_result['ifbprogress'], param_result['city'], param_result['industryv2'], page))
            else:
                print('没有数据')
            # 把当前爬取到的页数存在数据库里
            self.sc_param.update_one({'_id': param_result['_id']}, {'$set': {'page': page, 'status': 1}})
            page = page + 1
        # 该分类爬取完毕把爬取到的页数存在数据库里
        self.sc_param.update_one({'_id': param_result['_id']}, {'$set': {'status': 2, }})
        self.m.info('%s 的%s的%s的%s获取数据完毕' % (param_result['industry'], param_result['ifbprogress'], param_result['city'], param_result['industryv2']))

    @retry(AutoReconnect, tries=4, delay=1)
    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        while True:
            param_result = self.params_queue.get()
            if not param_result:
                try:
                    self.sc.bulk_write(self.local.insert_data_list)
                except Exception as err:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % err)
                break
            else:
                self.parse_xunhuan(param_result)

    def judge_xpath(self,res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def judge_xpath_err(self,detail_result):
        if (detail_result['industry'] == "工程建设" or detail_result['industry'] == "建设工程" or detail_result['industry'] == "政府采购") and detail_result['ifbprogress'] == "中标公告":
            try:
                SnapShot = detail_result["SnapShot"]
            except:
                SnapShot = ""
            if SnapShot:
                image_status = 2
                xpath_err = 0
            else:
                image_status = 0
                xpath_err = 1
        else:
            image_status = 0
            xpath_err = 0
        return image_status,xpath_err

    def get_all_text(self,res, xpath):
        """
        获取页面主体部分的纯文本
        """
        re_rule = re.compile("[\u4e00-\u9fa5a-zA-Z0-9]")
        res = etree.HTML(clear_html(res))
        res_tesc = "".join(res.xpath(xpath))
        all_tesc = "".join(re_rule.findall(res_tesc))
        if all_tesc and len(all_tesc) > 50:
            html_id = sha256_all_text(all_tesc)
            return html_id
        return ""

    @retry(AutoReconnect, tries=4, delay=1)
    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            if not detail_result:
                try:
                    self.sc.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))
                self.sc.update_many({"SnapShot": {"$exists": False}, "ifbprogresstag": "中标公告"},
                                    {"$set": {"SnapShot": "", "image_status": 0}})
                self.sc.update_many({"SnapShot": "", "ifbprogresstag": "中标公告"}, {"$set": {"image_status": 0}})
                break
            else:
                detail_res_ = self.send_rquest_get(url=detail_result['originalurl'], headers=self.headers)
                if detail_res_:
                    xpath = self.judge_xpath(detail_res_, self.xpaths)
                    if xpath != '':
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                    else:
                        xpath_err = 1
                        image_status = 0
                    TwoLvTitle = detail_result['title']
                    html_id = self.get_all_text(detail_res_, xpath + "//text()")
                    if html_id:
                        #判断该条数据在数据库中是否存在
                        detail_res = self.sc.find_one({"originalurl": detail_result['originalurl'],"html_id": html_id})
                        if not detail_res:
                            file_json = get_file_json(xpath, detail_res_)
                            self.local.get_detail_consumer_list.append(UpdateOne(
                                {"_id": detail_result["_id"]},
                                {"$set": {
                                    "html": clear_html(detail_res_),
                                    "html_id": html_id,
                                    "image_status": image_status,
                                    "TwoLvTitle": title_strip(TwoLvTitle),
                                    "utime": datetime.now(),
                                    "tesc_xpath": xpath,
                                    "xpath_err": xpath_err,
                                    "status": 2,
                                    "originalurl_data_from": {
                                        "url": detail_result['originalurl'],
                                        "method": "get",
                                        "request_only_data": {},
                                        "response_only_data": {}
                                    },
                                    "file_json": file_json,
                                    "Bid_data_acquisition_format": "HTML",
                                }}
                            ))
                        else:
                            self.sc.update_one({"_id": detail_result["_id"]
                                                             }, {"$set": {
                                "status": 2,
                                "image_status": 2,
                                "SnapShot": "该数据为重复数据，无需上传截图",
                                "xpath_err": 1,
                            }})
                    else:

                        self.sc.update_one({"_id": detail_result["_id"]}, {"$set": {
                            "status": 2,
                            "image_status": 2,
                            "html": "",
                            "SnapShot": "正文部分为空",
                            "xpath_err": 1,
                        }})
                if len(self.local.get_detail_consumer_list) >= 1:
                    try:
                        self.sc.bulk_write(self.local.get_detail_consumer_list)
                    except Exception as e:
                        self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                    else:
                        self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                    finally:
                        self.local.get_detail_consumer_list.clear()

    @retry(AutoReconnect, tries=4, delay=1)
    def screen_shot_consumer(self):
        while True:

            result = self.screen_queue.get()
            if not result:
                print("upload_images结束")
                break
            url, id = result["originalurl"], result["_id"]
            try:
                tesc_xpath = result["tesc_xpath"]
            except:
                continue
            if not tesc_xpath:
                continue
            upload_image_delete_pic2(url=url, coll_name="lzl_SiChuanxh_publish_data", id=id)

    @retry(AutoReconnect, tries=4, delay=1)
    def add_detail_to_queue(self,flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1, 'status_time': {'$lt': datetime.now() - timedelta(minutes=2)}}
        update_ = {'$set': {'status': 0}}
        self.sc.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.sc.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    @retry(AutoReconnect, tries=4, delay=1)
    def add_menu_producer_queue(self,flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.sc_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.sc_param.find_one_and_update(filter_, update_, proj,
                                                                        return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    @retry(AutoReconnect, tries=4, delay=1)
    def screen_shot_product(self,flag=False):

        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.sc.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogresstag": "中标公告"},
                                          {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.sc.find_one_and_update(
                    {"SnapShot": "", "image_status": 0, "ifbprogresstag": "中标公告"},
                    {"$set": {"image_status": 1}},
                    {"_id": 1, "originalurl": 1, "tesc_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def run_thread_list(self):
        # self.get_params()
        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product  #获取截图的生产者
        ]
        self.more_thread_name_list = [
            self.get_menu_producer_consumer,  # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer         #获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        # self.get_all_type()  # 所有的筛选种类入库
        #
        # # self.add_menu_producer_queue(True)  # 获取列表的生产者
        # # self.get_menu_producer_consumer() # 获取列表的消费者
        #
        # self.add_detail_to_queue(True)  # 获取详情的生产者
        # self.get_detail_consumer()  # 获取详情的消费者
        #
        # self.screen_shot_product(True)  # 获取截图的生产者
        # self.screen_shot_consumer()  # 获取截图的消费者


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong_db2'
    sc = SiChuanxh(db_name)
    # sc.get_params()
    sc.run()