#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
import hashlib
import random
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
import json
from hashlib import md5
from queue import Queue

import requests
from lxml.etree import tostring

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument

from base_spider import BaseSpider
from conf.conf_util import title_strip, sha256_all_text, clear_html, parse_xpath, judge_xpath
from conf.database import DATABASE
from conf.dber import MongoDBer
from conf.logging_debug import MyLogging


# 批量更新
class guangdongshengshuilixinyonogW(BaseSpider):
    def __init__(self, db_name):
        super(guangdongshengshuilixinyonogW, self).__init__()
        self.headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
        }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        # self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.db_m1 = MongoDBer(DATABASE['bendi'])  # mongodb 库连接对象
        # self.db_m1 = MongoDBer(DATABASE['bendi'])  # mongodb 库连接对象
        # self.db_m1 = MongoDBer(DATABASE['test_gu'])  # mongodb 库连接对象
        # self.guangdongshengshuilixinyonog_param = self.db_m["guangdongshengshuilitingjianshexinyong_param"]
        # self.guangdongshengshuilixinyonog_param_err = self.db_m["guangdongshengshuilitingjianshexinyong_param_err"]
        self.guangdongshengshuilixinyonog = self.db_m1["xinyongpingjia_data_detail_new"]
        self.xpaths = ['//div[@class="cxxx"]']
        # 日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()  # 获得本机ip

    def clear_html(self, html_str):
        from parsel import Selector
        html_str1 = html_str.replace('注册地址：', '')
        choice = Selector(html_str1)
        choice.xpath('//script').remove()
        choice.xpath('//style').remove()
        choice.xpath('//noscript').remove()
        choice.xpath('//nostyle').remove()
        choice.xpath('//nostyle').remove()
        choice.xpath('//input').remove()
        choice.xpath('//onclick').remove()
        choice.xpath('//p[@class="timetp"]').remove()
        choice.xpath('//footer[@class="zjcontent"]').remove()
        choice.xpath('//img').remove()
        # choice.xpath('//table[@style="width:100%"]//table[@style="width:100%"]').remove()
        # choice.xpath('//table[@class="table4"]//tr[9]').remove()
        # choice.xpath('//table[@class="table4"]//tr[8]').remove()
        choice.xpath('//p[@class="footer"]').remove()
        choice.xpath('//div[@class="gongn-box cf"]').remove()
        choice.xpath('//div[@class="header"]').remove()
        choice.xpath('//div[@class="container content"]').remove()
        choice.xpath('//a[@class="leftleft"]').remove()
        choice.xpath('//a[@class="print_blank"]').remove()
        # script
        html_ = choice.get()
        return html_

    def get_all_number_page(self, res, param_result):
        """
        获得总页数
        """
        print(res)
        res_ = etree.HTML(res)
        all_number_page_ = res_.xpath('//div[@id="body_Panel1"]//td[@align="left"]/font[2]/text()')
        if all_number_page_ != []:
            all_number_page = all_number_page_
        else:
            all_number_page = 1
        return int(all_number_page)

    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = True
        title_list = []
        originalurl_list = []
        publishdate_list = []
        ifbunit_list = []
        type_list = []
        param_res_xpath = json.loads(param_res)
        param_res_xpath_list = param_res_xpath['result']
        for result_ in param_res_xpath_list:
            originalurl_ = result_.get('id', '')
            if originalurl_ != '':
                originalurl = f'http://210.76.74.108/xydtgl/badBehaviorDetail?id={originalurl_}'
            else:
                originalurl = ''
            originalurl_list.append(originalurl)
            title_ = result_.get('xm_mc', "")
            if title_ != '':
                title = title_
            else:
                title = ''
            print(title)
            title_list.append(title)
            ifbunit_bianhao = result_.get('auditor', '')
            if ifbunit_bianhao != '':
                company_bianhao = ifbunit_bianhao[0].strip().replace('发布人：', '')
            else:
                company_bianhao = ''
            ifbunit_list.append(company_bianhao)
            type_list.append('不良行为')
            publishdate_ = result_.get('create_time', '')
            if publishdate_ != '':
                publishdate_1 = publishdate_[0].strip().replace('发布日期', '').replace('年', '-').replace('月',
                                                                                                           '-').replace(
                    '日', '')

            else:
                publishdate_1 = '1970-01-01'
            try:
                publishdate112 = datetime.strptime(publishdate_1, "%Y-%m-%d")
            except:
                publishdate112 = datetime.strptime('1970-01-01', "%Y-%m-%d")
            if publishdate112 == datetime.strptime('1970-01-01', "%Y-%m-%d"):
                publishdate = ''
            else:
                publishdate = publishdate112
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list, type_list

    def parse_data_1(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = True
        title_list = []
        originalurl_list = []
        publishdate_list = []
        ifbunit_list = []
        type_list = []
        param_res_xpath = json.loads(param_res)
        param_res_xpath_list = param_res_xpath['result']
        for result_ in param_res_xpath_list:
            originalurl_ = result_.get('id', '')
            if originalurl_ != '':
                originalurl = f'http://210.76.74.108/xydtgl/badBehaviorDetail?id={originalurl_}'
            else:
                originalurl = ''
            originalurl_list.append(originalurl)
            title_ = result_.get('xm_mc', "")
            if title_ != '':
                title = title_
            else:
                title = ''
            print(title)
            title_list.append(title)
            ifbunit_bianhao = result_.get('auditor', '')
            if ifbunit_bianhao != '':
                company_bianhao = ifbunit_bianhao
            else:
                company_bianhao = ''
            ifbunit_list.append(company_bianhao)
            # type_ = result_.get('fromTable', '')
            # if '失信被执行' in type_:
            #     type = '失信被执行人'
            # elif '重大税收违法' in type_:
            #     type = '重大税收违法失信主体'
            # else:
            #     type = '行政处罚'
            type_list.append('不良行为')
            publishdate_ = result_.get('create_time', '')
            if publishdate_ != '':
                publishdate_1 = publishdate_
            else:
                publishdate_1 = '1970-01-01'
            try:
                publishdate112 = datetime.strptime(publishdate_1, "%Y-%m-%d")
            except:
                publishdate112 = datetime.strptime('1970-01-01', "%Y-%m-%d")
            if publishdate112 == datetime.strptime('1970-01-01', "%Y-%m-%d"):
                publishdate = ''
            else:
                publishdate = publishdate112
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list, type_list

    def remove_js_style(self, response):
        '''
        删除HTML中的js和css
        '''
        try:
            encoding_ = 'utf-8'
            tree = etree.HTML(response)
            ele = tree.xpath("//script | //noscript")
            for e in ele:
                e.getparent().remove(e)
            ele2 = tree.xpath('//style | //nostyle')
            for e2 in ele2:
                e2.getparent().remove(e2)
            Html = html.tostring(tree, encoding=encoding_).decode(encoding_)
            return Html
        except Exception as err:
            print('remove_js_style报错是%s ' % err)
            return False

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            update_time_index = True
            for index in self.guangdongshengshuilixinyonog.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "update_time" in index["name"]:
                    update_time_index = False

            if temp_url_index:
                self.guangdongshengshuilixinyonog.create_index([("originalurl", 1), ("html_id", 1)], unique=True,
                                                         background=True)
            if temp_status_index:
                self.guangdongshengshuilixinyonog.create_index([("status", 1), ("sourceplatform", 1)], background=True)
            if update_time_index:
                self.guangdongshengshuilixinyonog.create_index([("utime", 1)], background=True)

            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list, type_list):
        """
        存储数据
        """
        for title, originalurl, publishdate, ifbunit, behavior_type in zip(
                title_list, originalurl_list, publishdate_list, ifbunit_list, type_list):
            ctime = datetime.now()
            if originalurl != '':
                self.insert_data('', '', '', title, publishdate, '广东省水利建设市场信用信息平台', originalurl, ctime, behavior_type)
            else:
                print('无')

    def insert_data(self, province, city, county, title, publishdate, sourceplatform, originalurl, ctime,
                    behavior_type):
        """
        插入数据
        """
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "province": province, "city": city, "county": county, "title": title_strip(title),
                    "publishdate": publishdate,
                    "sourceplatform": sourceplatform,
                    "originalurl": originalurl,
                    "ctime": ctime,
                    "behavior_type": behavior_type,
                    "status": 0,
                    "utime": ctime
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 1:
            try:
                self.guangdongshengshuilixinyonog.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def parse_xunhuan(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.guangdongshengshuilixinyonog_param.update_one({'_id': param_result['_id']},
                                                             {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        while True:
            headers = {
                "Accept": "*/*",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "Referer": "http://www.creditenergy.gov.cn/publicity",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest"
            }
            url = f"http://www.creditenergy.gov.cn/publicity/page?name=&code=&type=punish&page={str(page)}"
            time.sleep(1)
            param_res = requests.get(url, headers=headers, verify=False).content.decode('utf-8')
            all_number_page = self.get_all_number_page(param_res, param_result)
            # all_number_page = 10
            print('总页数是%s' % all_number_page)
            # 判断循环结束
            if page > int(all_number_page):
                print('抓取结束')
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list, type_list = self.parse_data(
                    param_res, param_result)

                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list,
                               type_list)
                self.m.info('第%s页获取数据成功' % (str(page)))
            else:
                print('没有数据')

            page = page + 1
        self.m.info('获取数据完毕')

    def parse_xunhuan_1(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.guangdongshengshuilixinyonog_param.update_one({'_id': param_result['_id']},
                                                             {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        while True:
            if page == 1:
                headers = {
                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
                        "Accept-Language": "zh-CN,zh;q=0.9",
                        "Cache-Control": "max-age=0",
                        "Connection": "keep-alive",
                        "Upgrade-Insecure-Requests": "1",
                        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
                    }
                url = f"http://222.92.61.100:9091/SzCreditPublicServicePortalPage/Credit.aspx"
                time.sleep(1)
                try:
                    param_res = requests.get(url, headers=headers, verify=False).content.decode('utf-8')
                except:
                    time.sleep(15)
                    continue
                all_number_page = self.get_all_number_page(param_res, param_result)
                # all_number_page = 10
                print('总页数是%s' % all_number_page)
                resp_xpath = etree.HTML(param_res)  # '//input[@id="__VIEWSTATE"]/@value'
                __VIEWSTATE = resp_xpath.xpath('//input[@id="__VIEWSTATE"]/@value')
            headers = {
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
                    "Accept-Language": "zh-CN,zh;q=0.9",
                    "Cache-Control": "max-age=0",
                    "Connection": "keep-alive",
                    "Upgrade-Insecure-Requests": "1",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
                }
            data = {
                "__EVENTTARGET": "ctl00$body$gridView1$ctl23$ctl00$ctl03",
                "__EVENTARGUMENT": "",
                "__LASTFOCUS": "",
                "__VIEWSTATE": __VIEWSTATE,
                "ctl00%24body%24dt_Type": "0",
                "ctl00%24body%24DBTextBox7": "",
                "ctl00%24body%24DBTextBox8": "",
                "ctl00%24body%24gridView1%24ctl23%24ctl00%24ctl05": f"{page}",
                "ctl00%24body%24tb_PERMIT_OWNER": "",
                "ctl00%24body%24tb_PROJECT_NAME": "",
                "ctl00%24body%24gridView2%24ctl23%24ctl00%24ctl05": "1",
                "ctl00%24body%24DBTextBox13": "",
                "ctl00%24body%24DBTextBox14": "",
                "ctl00%24body%24gridView4%24ctl23%24ctl00%24ctl05": "1"
            }
            url = f"http://222.92.61.100:9091/SzCreditPublicServicePortalPage/Credit.aspx"
            time.sleep(1)
            try:
                param_res = requests.post(url, headers=headers, data=data, verify=False).content.decode('utf-8')
            except:
                time.sleep(15)
                continue
            all_number_page = self.get_all_number_page(param_res, param_result)
            # all_number_page = 10
            print('总页数是%s' % all_number_page)
            # 判断循环结束
            if page > int(all_number_page):
                print('抓取结束')
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list, type_list = self.parse_data_1(
                    param_res, param_result)

                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list,
                               type_list)
                self.m.info('第%s页获取数据成功' % (str(page)))
            else:
                print('没有数据')

            page = page + 1
        self.m.info('获取数据完毕')

    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        param_result = {}
        self.parse_xunhuan(param_result)

    def get_menu_producer_consumer_1(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        param_result = {}
        self.parse_xunhuan_1(param_result)

    def judge_xpath_err(self, detail_result):

        image_status = 0
        xpath_err = 0
        return image_status, xpath_err

    def get_all_text(self, res1, test_xpath):
        rules1 = re.compile("[\u4e00-\u9fa5]")
        res_html = etree.HTML(res1)
        res_text = "".join(res_html.xpath(test_xpath))
        if res_text:
            all_text = "".join(rules1.findall(res_text))
            if all_text and len(all_text) > 20:
                html_id = sha256_all_text(all_text)
                return html_id, all_text
        return None, ''

    def get_all_text_2(self, res1, test_xpath, url):
        rules1 = re.compile("[\u4e00-\u9fa5]")
        res_html = etree.HTML(res1)
        res_text = "".join(res_html.xpath(test_xpath))
        print(test_xpath)
        if res_text:
            all_text = "".join(rules1.findall(res_text)).join(url)
            if all_text and len(all_text) > 20:
                html_id = self.sha256_all_text(all_text)
                return html_id, all_text
        return None, ''

    def sha256_all_text(self, all_text):
        """
        sha256加密alltext
        """
        sha = hashlib.sha256()
        sha.update(all_text.encode())
        return sha.hexdigest()

    def judge_xpath(self, res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def get_zhengwen_html(self, html_str, text_xpath):
        try:
            res_news = etree.HTML(html_str)
            # 获取某标签下的html源码（标签+内容）
            content = res_news.xpath(text_xpath)[0]
            zhengwen_html = tostring(content, encoding="utf-8").decode("utf-8")  # 编码格式为源码编码格式
            return zhengwen_html
        except:
            return ''

    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            # time.sleep(random.randint(2, 4))
            if not detail_result:
                try:
                    self.guangdongshengshuilixinyonog.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))

                break
            else:
                headers = {
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
                    "Accept-Language": "zh-CN,zh;q=0.9",
                    "Cache-Control": "max-age=0",
                    "Connection": "keep-alive",
                    "Upgrade-Insecure-Requests": "1",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
                }
                url = detail_result['originalurl']
                try:
                    detail_res_ = requests.get(url, headers=headers, verify=False).content.decode('utf-8')
                except:
                    continue
                # res_xpath_ = etree.HTML(detail_res_)
                # # xpath_title = '//table[@class="table4"]/tbody[@id]/tr[1]|//table[@class="table4"]/tbody[@id]/tr[2]|//table[@class="table4"]/tbody[@id]/tr[3]|//table[@class="table4"]/tbody[@id]/tr[4]|//table[@class="table4"]/tbody[@id]/tr[5]|//table[@class="table4"]/tbody[@id]/tr[6]|//table[@class="table4"]/tbody[@id]/tr[7]'
                # res_htmls = res_xpath_.xpath(xpath_title)
                # res_htmlsss = ''
                # for i in res_htmls:
                #     res_htmls = tostring(i, encoding='utf-8').decode("utf-8")
                #     res_htmlsss = res_htmlsss + res_htmls
                if detail_result['behavior_type'] == '111':
                    # 1.根据xpath
                    res_xpath_ = etree.HTML(detail_res_)
                    res_xpath_list = res_xpath_.xpath('//div[@id="nrid"]/table[@class="table4"]')
                    # 循环
                    for res_xpath in res_xpath_list:
                        res_html = tostring(res_xpath, encoding='utf-8').decode("utf-8")  # 编码为utf-8格式
                        # print(res_html)
                        # print('-'*100)
                        xpath = ''
                        # if xpath != '':
                        #     image_status, xpath_err = self.judge_xpath_err(detail_result)
                        # else:
                        #     image_status, xpath_err = 2, 1
                        # 详情标题
                        res_title = ''
                        if res_title:
                            res_title = res_title
                        else:
                            res_title = ''
                        html_id_1, detail_text_1 = self.get_all_text_2(res_html, xpath + '//text()',
                                                                       detail_result['originalurl'])
                        print('html_id是' + html_id_1)
                        sha256_id = html_id_1
                        if html_id_1:
                            # 增加数据
                            self.guangdongshengshuilixinyonog.update_one({"sha256_id": sha256_id},
                                                                   {"$set": {
                                                                       "originalurl": detail_result['originalurl'],
                                                                       "ctime": datetime.now(),
                                                                       "behavior_type": detail_result['behavior_type'],
                                                                       "detail_text": detail_text_1,
                                                                       "html": self.clear_html(res_html),
                                                                       "html_id": html_id_1,
                                                                       "sha256_id": html_id_1,
                                                                       "originalurl_data_from": {
                                                                           "url": detail_result['originalurl'],
                                                                           "method": "get"},
                                                                       "county": '',
                                                                       "province": '',
                                                                       "publishdate": '',
                                                                       "sourceplatform": '广东省水利建设市场信用信息平台',
                                                                       "title": res_title,
                                                                       "type": "不良",
                                                                       "utime": datetime.now(),
                                                                       "status": 2,
                                                                       "xpath_err": 0,
                                                                       "zhengwen_html": res_html
                                                                   }}, upsert=True)
                    # 删除列表数据
                    self.guangdongshengshuilixinyonog.delete_one({'_id': detail_result['_id']})

                else:
                    if detail_res_:
                        xpath = judge_xpath(detail_res_, self.xpaths)
                        if xpath != '':
                            image_status, xpath_err = self.judge_xpath_err(detail_result)
                        else:
                            image_status, xpath_err = 2, 1
                            # raise Exception('xpath_err url是%s' % detail_result['originalurl'])
                        html_id, detail_text = self.get_all_text(detail_res_, xpath + "//text()")
                        print('html_id是' + html_id)
                        if html_id:
                            # 判断该条数据在数据库中是否存在
                            detail_res = self.guangdongshengshuilixinyonog.find_one(
                                {"originalurl": detail_result['originalurl'], "html_id": html_id})
                            if not detail_res:
                                self.local.get_detail_consumer_list.append(UpdateOne(
                                    {"_id": detail_result["_id"]},
                                    {"$set": {
                                        "html": clear_html(detail_res_),
                                        "html_id": html_id,
                                        "detail_text": detail_text,
                                        "utime": datetime.now(),
                                        "text_xpath": xpath,
                                        'Bid_data_acquisition_format': 'html',
                                        'previewurl': detail_result['originalurl'],
                                        'two_lv_title': detail_result['title'],
                                        'type': '不良',
                                        # 正文源html内容
                                        'zhengwen_html': detail_res_,
                                        "xpath_err": 0,
                                        "status": 2,
                                    }}
                                ))
                            else:
                                self.guangdongshengshuilixinyonog.update_one({"_id": detail_result["_id"]
                                                                        }, {"$set": {
                                    "status": 2,
                                    "err_txt": "该数据为重复数据，无需上传截图",
                                    "xpath_err": 1,
                                    "html": "",
                                }})
                    else:
                        self.guangdongshengshuilixinyonog.update_one({"_id": detail_result["_id"]}, {"$set": {
                            "status": 2,
                            "html": "",
                            "err_txt": "正文部分为空",
                            "xpath_err": 1,
                        }})
                    if len(self.local.get_detail_consumer_list) >= 1:
                        try:
                            self.guangdongshengshuilixinyonog.bulk_write(self.local.get_detail_consumer_list)
                        except Exception as e:
                            self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                        else:
                            self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                        finally:
                            self.local.get_detail_consumer_list.clear()

    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1}
        update_ = {'$set': {'status': 0}}
        self.guangdongshengshuilixinyonog.update_many(filter_, update_)
        i = 0
        filter_ = {'status': 0, 'sourceplatform': '广东省水利建设市场信用信息平台'}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.guangdongshengshuilixinyonog.find_one_and_update(filter_, update_, proj,
                                                                       return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.guangdongshengshuilixinyonog_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.guangdongshengshuilixinyonog_param.find_one_and_update(filter_, update_, proj,
                                                                             return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def run_thread_list(self):
        # self.get_menu_producer_consumer()
        self.get_menu_producer_consumer_1()
        self.thread_name_list = [
            self.add_detail_to_queue,  # 获取详情的生产者
        ]
        self.more_thread_name_list = [
            self.get_detail_consumer,  # 获取详情的消费者
        ]


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    # db_name = 'bendi'
    guangdongshengshuilixinyonogW = guangdongshengshuilixinyonogW(db_name)
    # guangdongshengshuilixinyonogW.get_params()
    guangdongshengshuilixinyonogW.proxy_flag = False
    guangdongshengshuilixinyonogW.run()
