#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
"""
@author:    lizhiheng
@date:      2021/12/27
@software:  PyCharm
@file:      lzl_hebi_publish_data.py
@project:   tender_project
@time:      14:35
@user:      Administrator
"""
import hashlib
import json
import random
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue

import requests
from parsel import Selector
from pymongo.errors import AutoReconnect
from retry import retry

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(r'F:\code\local_tender_peoject\tender_project')
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument

from tender_project. base_spider import BaseSpider

from tender_project.conf.conf_util import   title_strip,   send_request, \
    sha256_all_text, clear_html, parse_xpath, judge_xpath
from tender_project.conf.database import DATABASE
from tender_project.conf.dber import MongoDBer
from tender_project.conf.logging_debug import MyLogging


# 批量更新
class changshazfcg(BaseSpider):
    def __init__(self, db_name):
        super(changshazfcg, self).__init__()
        self.headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded",
        "Origin": "http://changs.ccgp-hunan.gov.cn",
        "Referer": "http://changs.ccgp-hunan.gov.cn/gp/noticeSerach.html?basicArea=changsha&articleType=3",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest"
}
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.db_m1 = MongoDBer(DATABASE['guxulong'])  # mongodb 库连接对象
        self.changshazfcg_param = self.db_m["changshazfcg_param_all"]
        # self.changshazfcg_param = self.db_m["changshazfcg_param"]
        self.changshazfcg_data = self.db_m1["changshazfcg_data"]
        self.xpaths = ["//body",]
        self.xpaths = ["//body",]
        self.xpaths = ["//body",]
        self.xpaths = ["//body",]
        # 日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()  # 获得本机ip

    def get_params(self):
        """
        获取筛选参数的流程
        """
        basicDatetime = [
            '2022-09-01', '2022-10-01', '2022-11-01', '2022-12-01', '2023-01-01', '2023-02-01', '2023-03-01','2023-04-01', '2023-05-01','2023-06-01']
        basicDatetimes = [
            '2022-09-31', '2022-10-31', '2022-11-31', '2022-12-31', '2023-01-31', '2023-02-31', '2023-03-31', '2023-04-31', '2023-05-31', '2023-06-31']

        param_dict = {
            "公示公告":{
                # "征求公众意见":["招标公告","217","193","政府采购"],
                "单一来源公示":["招标公告","217","194","政府采购"],
                "意向公开":["采购意向","217","240","采购意向"],
                "进口产品公示":["招标公告","217","241","政府采购"],
                "采购需求公开": ["招标公告","", "901","政府采购"],
                "资金支付公开": ["招标公告","", "902","政府采购"],
            },
            "采购公告":{
                "招标公告": ["招标公告","225","188","政府采购"],
                # "网上竞价":["招标公告","225", "202","政府采购"],
                "非招标公告":["招标公告","225", "218","政府采购"],
                # "资格预审公告":["招标公告","225", "222","政府采购"],
            },
            "澄清(更正)公告":{
                "延期公告":["招标公告","189","210","政府采购"],
                "澄清（更正）公告":["招标公告","189", "280","政府采购"],
            },
            "结果公告":{
                "中标公告":["中标公告","190", "203","政府采购"],
                "成交公告":["中标公告","190", "204","政府采购"],
                # "竞价结果公告":["中标公告","190", "205","政府采购"],
                "废标公告":["中标公告", "190","208","政府采购"],
                "终止公告":["中标公告","190", "219","政府采购"],
                "其他公告":["中标公告","190","223","政府采购"],
            },
            "合同及验收公示":{
                "合同公告":["中标公告","191", "207","政府采购"],
                "验收结果公开":["中标公告","","903","政府采购"],
            }
        }

        for i in range(len(basicDatetime)):
            for industry in param_dict:
                for ifbprogress in param_dict[industry]:
                    data_dict = {
                        "ifbprogress": param_dict[industry][ifbprogress][0],
                        "ifbprogresstag": ifbprogress,
                        "industry": param_dict[industry][ifbprogress][3],
                        "categoryId":param_dict[industry][ifbprogress][1],
                        "categoryType":param_dict[industry][ifbprogress][2],
                        # "fieldValue":param_dict[industry][ifbprogress][0],
                        'basicDatetime':basicDatetime[i],
                        'basicDatetimes':basicDatetimes[i],
                        "page": 1,
                        "status": 0
                    }
                    sql_res = self.changshazfcg_param.find_one({
                        "ifbprogress": param_dict[industry][ifbprogress][0],
                        "ifbprogresstag": ifbprogress,
                        "industry": param_dict[industry][ifbprogress][3],
                        "categoryId": param_dict[industry][ifbprogress][1],
                        "categoryType": param_dict[industry][ifbprogress][2],
                        # "fieldValue": param_dict[industry][ifbprogress][0],
                        'basicDatetime':basicDatetime[i],
                        'basicDatetimes':basicDatetimes[i],
                    })
                    if not sql_res:
                        self.changshazfcg_param.insert_one(data_dict)

    def get_all_number_page(self, param_res, param_result):
        """
        获得总页数
        """
        res_ = json.loads(param_res)
        all_number_total_ = res_["list"]["totalCount"]
        if all_number_total_ != '':
            all_number_page = all_number_total_ / 10
        else:
            all_number_page = 1
        return int(all_number_page)
        # try:
        #
        # except:
        #     return 0

    @retry(AutoReconnect, tries=10, delay=1)
    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = False
        title_list = []
        originalurl_list = []
        publishdate_list = []
        result_list = []
        print(param_res)
        param_res_xpath = json.loads(param_res)
        param_res_xpath_list = param_res_xpath['list']['list']
        for result_ in param_res_xpath_list:
            result_list.append(result_)
            originalurl_ = result_.get("basicId", '')
            if originalurl_ != '':
                originalurl = 'http://changs.ccgp-hunan.gov.cn/gp/gpcategory/showNew/' + str(originalurl_)
            else:
                originalurl = ''
            originalurl_list.append(originalurl)
            title_ = result_.get("basicTitle", '')
            if title_ != '':
                title = title_.strip()
            else:
                title = ''
            title_list.append(title)
            print(title)
            publishdate_ = result_.get("basicDatetime", '')
            if publishdate_ != []:
                publishdate_1 = publishdate_.strip()
            else:
                publishdate_1 = '1970-01-01 00:00:00'
            publishdate = datetime.strptime(publishdate_1, "%Y-%m-%d %H:%M:%S")
            # 增量更新的时候做判断
            if param_result.get('day_flag'):
                if publishdate < datetime.now() - timedelta(days=30):
                    day_end_flag = True
                    break
            if publishdate_1 == '1970-01-01 00:00:00':
                publishdate = ''
            else:
                publishdate = publishdate
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, result_list

    def remove_js_style(self, response):
        '''
        删除HTML中的js和css
        '''
        try:
            encoding_ = 'utf-8'
            tree = etree.HTML(response)
            ele = tree.xpath("//script | //noscript")
            for e in ele:
                e.getparent().remove(e)
            ele2 = tree.xpath('//style | //nostyle')
            for e2 in ele2:
                e2.getparent().remove(e2)
            Html = html.tostring(tree, encoding=encoding_).decode(encoding_)
            return Html
        except Exception as err:
            print('remove_js_style报错是%s ' % err)
            return False

    @retry(AutoReconnect, tries=10, delay=1)
    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            html_index = True
            for index in self.changshazfcg_data.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html" in index["name"]:
                    html_index = False
            if temp_url_index:
                self.changshazfcg_data.create_index([("originalurl", 1), ("html_id", 1)], unique=True, background=True)
            if temp_status_index:
                self.changshazfcg_data.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.changshazfcg_data.create_index([("SnapShot", 1), ("ifbprogress", 1)],
                                         background=True)
                self.changshazfcg_data.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.changshazfcg_data.create_index([("update_time", 1)], background=True)
            if html_index:
                self.changshazfcg_data.create_index([("html", 1)], background=True)
            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    @retry(AutoReconnect, tries=10, delay=1)
    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, result_list):
        """
        存储数据
        """
        for title, originalurl, publishdate, result_ in zip(
                title_list, originalurl_list, publishdate_list, result_list):
            publishyear = str(publishdate)[:4]
            ctime = datetime.now()
            uuid = ""

            self.insert_data(param_result['industry'], "", param_result['ifbprogress'], param_result['ifbprogresstag'],'',
                             '湖南省', '长沙市', '', title, '', '',
                             publishdate, publishyear, '', '长沙市政府采购网', originalurl,result_, '',
                             ctime, "", self.myself_ip, "L", "", page, weather_have_iframe=0,
                             weather_have_image=0, weather_have_pdf=0, weather_have_pdf_type2=0, url_type='json',
                             original_website_id=75, weather_have_blank_url=0, weather_have_enclosure=0,
                             uuid=uuid, image_status=0)

    @retry(AutoReconnect, tries=10, delay=1)
    def insert_data(self, industry, industryv2, ifbprogress, ifbprogresstag, channelname, province, city, county,title,
                    ifbunit, agent,
                    publishdate, publishyear, projectno, sourceplatform,originalurl,result_, tenderaddress,
                    ctime, SnapShot, ip, executor, text_xpath, page, weather_have_iframe,
                    weather_have_image, weather_have_pdf, weather_have_pdf_type2, url_type,
                    original_website_id, weather_have_blank_url, weather_have_enclosure,
                    uuid, image_status):
        """
        插入数据
         html":res,
        "TwoLvTitle": TwoLvTitle,
        "update_time": now_time,
        "text_xpath":xpath,
        "xpath_err":xpath_err,
        "status":2
        """
        md5_url = self.md5_url(originalurl)
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "industry": industry, "industryv2": industryv2, "ifbprogress": ifbprogress,
                    "ifbprogresstag": ifbprogresstag, "channelname": channelname,
                    "province": province, "city": city, "county": county, "title": title_strip(title),
                    "publishdate": publishdate, "publishyear": publishyear,
                    "projectno": projectno, "sourceplatform": sourceplatform,
                    "originalurl": originalurl, "md5_originalurl": md5_url, "tenderaddress": tenderaddress,
                    "ctime": ctime, "SnapShot": SnapShot, "ip": ip, "executor": executor,
                    "utime": ctime, "version_num": 1, "agent": agent,
                    "ifbunit": ifbunit, "page": page, "result_":result_,
                    "weather_have_iframe": weather_have_iframe, "weather_have_image": weather_have_image,
                    "weather_have_pdf": weather_have_pdf, "weather_have_pdf_type2": weather_have_pdf_type2,
                    "url_type": url_type, "original_website_id": original_website_id,
                    "weather_have_enclosure": weather_have_enclosure,
                    "weather_have_blank_url": weather_have_blank_url, "uuid": uuid,
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.changshazfcg_data.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    @retry(AutoReconnect, tries=10, delay=1)
    def parse_xunhuan(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        # 断点续爬
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.changshazfcg_param.update_one({'_id': param_result['_id']},
                                             {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        url = "http://changs.ccgp-hunan.gov.cn/gp/gpcategory/getNotice"
        data = {
                "page": str(1),
                "limit": "10",
                "sidx": "",
                "order": "",
                "categoryId": param_result['categoryId'],
                "articleType": "0",
                "projid": "",
                "name": "",
                "cgType": "0",
                "buyerNm": "",
                "buyerOrgNm": "",
                "supplyNm": "",
                "basicType": "1",
                "basicDatetime": param_result['basicDatetime'],
                "basicDatetimes": param_result['basicDatetimes'],
                "type": "0",
                "basicArea": "0",
                "categoryType": str(param_result['categoryType'])
}
        # data = json.dumps(data, separators=(',', ':'))
        print(data)
        param_res = requests.post(url=url, headers=self.headers, data=data).content.decode('utf-8')
        print('**************%s' % param_res)
        all_number_page = self.get_all_number_page(param_res, param_result)
        print('总页数是%s' % all_number_page)
        while True:
            # 判断循环结束
            if page > int(all_number_page):
                # if page > int(5):
                self.changshazfcg_param.update_one({'_id': param_result['_id']},
                                                   {'$set': {'page': page, 'day_flag': True,
                                                             'all_start_time': datetime.now()}})
                break
            url = "http://changs.ccgp-hunan.gov.cn/gp/gpcategory/getNotice"
            data = {
                "page": str(page),
                "limit": "10",
                "sidx": "",
                "order": "",
                "categoryId": param_result['categoryId'],
                "articleType": "0",
                "projid": "",
                "name": "",
                "cgType": "0",
                "buyerNm": "",
                "buyerOrgNm": "",
                "supplyNm": "",
                "basicType": "1",
                "basicDatetime": param_result['basicDatetime'],
                "basicDatetimes": param_result['basicDatetimes'],
                "type": "0",
                "basicArea": "0",
                "categoryType": str(param_result['categoryType'])
}
            try:
                param_res = requests.post(url=url, headers=self.headers, data=data).content.decode('utf-8')

                if param_res:
                    title_list, originalurl_list, publishdate_list, day_end_flag, result_list = self.parse_data(
                        param_res, param_result)

                    self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, result_list)
                    if day_end_flag:
                        # 在这把数据库中的时间改成现在的时间
                        self.changshazfcg_param.update_one({'_id': param_result['_id']},
                                                     {'$set': {'day_flag': True, 'page': 1,
                                                               'all_start_time': datetime.now()}})
                        self.m.info('%s 的%s的第%s页增量成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
                        break
                    self.m.info('%s 的%s的第%s页获取数据成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
            except Exception as err:
                print()
                time.sleep(random.randint(10, 15))
                continue
            else:
                print('没有数据')
            # 把当前爬取到的页数存在数据库里
            self.changshazfcg_param.update_one({'_id': param_result['_id']}, {'$set': {'page': page, 'status': 1}})
            page = page + 1
            time.sleep(random.randint(1, 2))
        # 该分类爬取完毕把爬取到的页数存在数据库里
        self.changshazfcg_param.update_one({'_id': param_result['_id']}, {'$set': {'status': 2, }})
        self.m.info('%s 的%s获取数据完毕' % (param_result['industry'], param_result['ifbprogresstag']))

    @retry(AutoReconnect, tries=10, delay=1)
    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        while True:
            param_result = self.params_queue.get()
            # categoryId = param_result['categoryId']
            # print(param_result)

            if not param_result:
                try:
                    self.changshazfcg_data.bulk_write(self.local.insert_data_list)
                except Exception as err:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % err)
                break
            else:
                self.parse_xunhuan(param_result)

    def judge_xpath_err(self, detail_result):
        if (detail_result['industry'] == "工程建设" or detail_result['industry'] == "建设工程" or detail_result[
            'industry'] == "政府采购") and detail_result['ifbprogress'] == "中标公告":
            try:
                SnapShot = detail_result["SnapShot"]
            except:
                SnapShot = ""
            if SnapShot:
                image_status = 2
                xpath_err = 0
            else:
                image_status = 0
                xpath_err = 0
        else:
            image_status = 0
            xpath_err = 0
        return image_status, xpath_err

    def get_all_text(self, res1, test_xpath, title, publishdate):
        rules1 = re.compile("[\u4e00-\u9fa5]")
        res_html = etree.HTML(res1)
        res_text = "".join(res_html.xpath(test_xpath))
        if res_text:
            all_text = "".join(rules1.findall(res_text))
            if all_text and len(all_text) > 20 and title != '' and publishdate != '':
                html_id = sha256_all_text(all_text)
                return html_id
        return None

    def sha256_all_text(self, all_text):
        """
        sha256加密alltext
        """
        sha = hashlib.sha256()
        sha.update(all_text.encode())
        return sha.hexdigest()

    def judge_xpath(self, res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def get_file_json(self, text_xpath, html_):

        rule = re.compile("\.([^.]*)$")
        res = etree.HTML(html_)
        file_url_list1 = res.xpath(text_xpath + "//a/@href")
        file_url_list2 = res.xpath(text_xpath + "//button/@code")
        file_url_list = file_url_list1 + file_url_list2
        file_name_list1 = res.xpath(text_xpath + "//a//text()")
        file_name_list2 = res.xpath(text_xpath + "//button//text()")
        file_name_list = file_name_list1 + file_name_list2
        file_url_real_list = []
        file_name_real_list = []
        file_name_real_list = []
        for file_url, file_name in zip(file_url_list, file_name_list):
            if ".pdf" in file_url or ".zip" in file_url or ".doc" in file_url or ".rar" in file_url or ".zip" in file_url \
                    or ".xlsx" in file_url or ".pdf" in file_name or ".zip" in file_name or ".doc" in file_name or ".rar" in file_name \
                    or ".zip" in file_name or ".xlsx" in file_name:
                file_url_real_list.append(file_url)
                file_name_real_list.append(file_name)
        file_type = ["".join(rule.findall(i)) for i in file_url_real_list]
        file_json = {"files": []}
        print(file_name_real_list)
        print(file_url_real_list)
        print(file_type)
        for n, u, t in zip(file_name_real_list, file_url_real_list, file_type):
            if n and t and u:
                file_json["files"].append({"file_name": n, "file_url": u, "file_type": t})
        if file_json == {"files": []}:
            file_json = ""
        return file_json

    def clear_html(self, html_str):
        choice = Selector(html_str)
        choice.xpath('//script').remove()
        choice.xpath('//style').remove()
        choice.xpath('//noscript').remove()
        # choice.xpath('//nostyle').remove()
        # choice.xpath('//meta').remove()
        # script
        html_ = choice.get()
        return html_

    @retry(AutoReconnect, tries=10, delay=1)
    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            # print(detail_result['originalurl'])
            time.sleep(random.randint(1,3))
            if not detail_result:
                try:
                    self.changshazfcg_data.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))
                break
            else:
                print('开始更新    '+ detail_result['originalurl'])
                headers = {
                    "Accept": "application/json, text/javascript, */*; q=0.01",
                    "Accept-Language": "zh-CN,zh;q=0.9",
                    "Connection": "keep-alive",
                    "Content-Length": "0",
                    "Origin": "http://changs.ccgp-hunan.gov.cn",
                    "Referer": "http://changs.ccgp-hunan.gov.cn/gp/showNotice.html?basicId=281735&articleType=2&basicArea=wangcheng",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
                    "X-Requested-With": "XMLHttpRequest"
                }
                requests_url = detail_result['originalurl']
                detail_result_ = requests.post(requests_url, headers=headers,   verify=False).content.decode('utf-8')


                if '采购意向' in detail_result['ifbprogresstag']:
                    not_parse_html = 0
                else:
                    not_parse_html = ''
                if detail_result_:

                    try:
                        detail_res_ = json.loads(detail_result_)['new']['new']['articleContent']
                    except:
                        print('报错了')
                        print(detail_result['originalurl'])
                        continue
                    try:
                        xpath = judge_xpath(detail_res_, self.xpaths)
                    except:
                        xpath = ''
                    if xpath != '':
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                    else:
                        continue
                        # raise Exception('xpath_err url是%s' % detail_result['info_url'])
                    TwoLvTitle = json.loads(detail_result_)['new']['new']['noticeTitle']
                    html_id = self.get_all_text(detail_res_, "//text()", detail_result['title'], detail_result['publishdate'])
                    print('html_id是')
                    print(html_id)
                    try:
                        print('html_id是'+html_id)
                    except:
                        continue
                    if html_id:
                        # 判断该条数据在数据库中是否存在
                        detail_res = self.changshazfcg_data.find_one(
                            {"originalurl": detail_result['originalurl'], "html_id": html_id})
                        if not detail_res:
                            file_json = self.get_file_json(xpath, detail_res_)
                            self.local.get_detail_consumer_list.append(UpdateOne(
                                {"_id": detail_result["_id"]},
                                {"$set": {
                                    "originalurl": detail_result['originalurl'],
                                    # "html": self.clear_html(detail_res_),
                                    "html": detail_res_,
                                    "html_id": html_id,
                                    "image_status": image_status,
                                    "TwoLvTitle": title_strip(TwoLvTitle),
                                    "utime": datetime.now(),
                                    "text_xpath": xpath,
                                    "not_parse_html" if not_parse_html == 0 else 'is_parse_html': 0,
                                    "xpath_err": xpath_err,
                                    "status": 2,
                                    "originalurl_data_from": {
                                        "url": detail_result['originalurl'],
                                        "method": "get",
                                        "request_only_data": {},
                                        "response_only_data": {}
                                    },
                                    "file_json": file_json,
                                    "Bid_data_acquisition_format": "HTML",
                                }}
                            ))
                        else:
                            self.changshazfcg_data.update_one({"_id": detail_result["_id"]
                                                    }, {"$set": {
                                "status": 2,
                                "image_status": 2,
                                "err_txt": "该数据为重复数据，无需上传截图",
                                "xpath_err": 1,
                            }})
                    else:
                        self.changshazfcg_data.update_one({"_id": detail_result["_id"]
                                                            }, {"$set": {
                            "status": 2,
                            "image_status": 2,
                            "err_txt": "没有正文",
                            "xpath_err": 1,
                        }})

                else:
                    self.changshazfcg_data.update_one({"_id": detail_result["_id"]}, {"$set": {
                        "status": 2,
                        "image_status": 2,
                        "html": "",
                        "err_txt": "正文部分为空",
                        "xpath_err": 1,
                    }})
                if len(self.local.get_detail_consumer_list) >= 1:
                    try:
                        self.changshazfcg_data.bulk_write(self.local.get_detail_consumer_list)
                    except Exception as e:
                        self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                    else:
                        self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                    finally:
                        self.local.get_detail_consumer_list.clear()


    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1}
        update_ = {'$set': {'status': 0}}
        self.changshazfcg_data.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.changshazfcg_data.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    @retry(AutoReconnect, tries=10, delay=1)
    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.changshazfcg_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.changshazfcg_param.find_one_and_update(filter_, update_, proj,
                                                             return_document=ReturnDocument.AFTER)
                time.sleep(random.randint(3,5))
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)


    def run_thread_list(self):
        self.get_params()
        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            # self.add_detail_to_queue,  # 获取详情的生产者

        ]
        self.more_thread_name_list = [
            self.get_menu_producer_consumer,  # 获取列表的消费者
            # self.get_detail_consumer,  # 获取详情的消费者
        ]



if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    changshazfcg = changshazfcg(db_name)
    # changshazfcg.get_params()
    changshazfcg.run()
