#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
"""
@author:    guxulong
@date:      2022/7/21
@software:  PyCharm
@file:      quanguoliaoning.py
@project:   tender_project
@time:      14:35
@user:      Administrator
"""

import logging
import os
import random
import sys
import threading
import time
from datetime import date, datetime, timedelta
from hashlib import md5
from queue import Queue

import requests
from bson import ObjectId
from fake_useragent import UserAgent
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument
from requests import request

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)

from base_spider import BaseSpider
from conf.database import DATABASE
from conf.dber import MongoDBer
from conf.logging_debug import MyLogging
from conf.conf_util import title_strip, get_file_json, get_all_text, upload_image_delete_pic2, \
    send_request, originalurl_data_from, remove_js_style, clear_html


class LiaoNing(BaseSpider):
    def __init__(self, db_name):
        super(LiaoNing, self).__init__()
        self.url = "http://www.lnggzy.gov.cn/lnggzy/showinfo/jyxxsearch.aspx"
        self.headers = {
            "Cookie": "ASP.NET_SessionId=ci0vfxaaunr2mrziagzw4h45",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
        }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=1000)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=1000)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.quanguo_liaoning_params = self.db_m["quanguo_liaoning_params"]
        self.quanguo_liaoning = self.db_m["quanguo_liaoning"]
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.text_xpath = {
            "政府采购采购/预审公告": "zfcg_zbgg1_cont",
            "政府采购变更公告": "zfcg_bggg1_cont",
            "政府采购采购合同公示": "zfcg_htgg1_cont",
            "政府采购中标结果公示": "zfcg_zbgs1_cont",
            "建设工程招标、资审公告": "jsgc_zbgg1_cont",
            "建设工程中标候选人公示": "jsgc_hxr1_cont",
            "建设工程中标结果公示": "jsgc_zbgs1_cont",
            "国有产权交易公告": "cqjy_jygg1_cont",
            "国有产权成交公告": "cqjy_cjgg1_cont",
            "土地/矿产权交易公告": "tdjy_jygg1_cont",
            "土地/矿产权成交公告": "tdjy_cjgg1_cont",
        }
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()

    def get_params(self):
        """
        获取industry 和 ifbprogresstag信息的流程
        """
        industry_text_list = ["政府采购", "建设工程", "国有产权", "土地/矿产权"]
        industry_value_list = ["001", "002", "003", "004"]
        for industry_text, industry_value in zip(industry_text_list, industry_value_list):
            # 判断数据库中是否有这个链接
            url_flag = self.quanguo_liaoning_params.find_one(
                {'industry_text': industry_text, 'industry_url': industry_value})
            result_list = self.generate_url(industry_value)
            if url_flag is None:
                for result in result_list:
                    self.insert_params(industry_text, industry_value, result['url'], result['begin_date'],
                                       result['end_date'])

    def insert_params(self, industry_text, industry_value, url, begin_date, end_date):
        """
        插入params到数据库中
        """
        data = {
            "industry_text": industry_text,
            "industry_url": industry_value,
            "url": url,
            "page": 1,
            "status": 0,
            "begin_date": begin_date,
            "end_date": end_date,
        }
        try:
            self.quanguo_liaoning_params.insert_one(data)
        except Exception as e:
            self.m.error("插入params到数据库中失败%s" % e)

    def get_menu_producer_consumer(self):
        """
        获取目录页数据
        """
        self.local.insert_data_list = []
        while True:
            result = self.params_queue.get()
            if not result:
                self.m.info("get_data结束")
                try:
                    self.quanguo_liaoning.bulk_write(self.local.insert_data_list)
                except Exception as e:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % e)
                break
            else:
                self.parse_xunhuan(result)

    def generate_dates_list(self):
        """
        生成从2008年开始到现在每个月的时间
        """
        dates_list = []
        all = []
        for i in range(2008, 2023):
            for j in range(1, 13):
                if i == 2022 and j > datetime.now().month:
                    continue
                if j < 10:
                    j = "0" + str(j)
                dates = (f"{i}-{j}-01")
                dates_list.append(dates)
        dates_list2 = dates_list[1:]
        dates_list2.append(str(date.today()))
        for date1, date2 in zip(dates_list, dates_list2):
            all.append((date1, date2))
        return all

    def generate_url(self, industry_value):
        """
        生成要访问的url
        """
        dates_list = self.generate_dates_list()
        result_list = []
        for dates in dates_list:
            inset_parms = dict()
            inset_parms['begin_date'] = dates[0]
            inset_parms['end_date'] = dates[1]
            inset_parms[
                'url'] = "http://www.lnggzy.gov.cn/lnggzy/showinfo/Morejyxx.aspx?timebegin=%s&timeend=%s&timetype=06&num1=%s&num2=%s&jyly=005&word=" % (
            dates[0], dates[1], industry_value, industry_value + "000")
            result_list.append(inset_parms)
            # url_list.append("http://www.lnggzy.gov.cn/lnggzy/showinfo/Morejyxx.aspx?timebegin=%s&timeend=%s&timetype=06&num1=%s&num2=%s&jyly=005&word=" % (dates[0],dates[1], industry_value, industry_value + "000"))
        return result_list

    def generate_formdata(self, page, __VIEWSTATE, __VIEWSTATEGENERATOR):
        """
        生成post请求的formdata
        """
        data = {
            "__VIEWSTATE": __VIEWSTATE,
            "__VIEWSTATEGENERATOR": __VIEWSTATEGENERATOR,
            "__EVENTTARGET": "MoreInfoListjyxx1$Pager",
            "__EVENTARGUMENT": "%s" % page,
            "__VIEWSTATEENCRYPTED": "",
            "MoreInfoListjyxx1$Pager_input": "%s" % page == (page - 1 if page > 1 else page == 1),
        }
        return data

    def parse_all_page_number(self, url):
        """
        解析所有页码,以及第一页的post请求的参数
        """
        response = self.send_rquest_get(url)
        res = etree.HTML(response)
        temp = "".join(res.xpath("//span[@id='MoreInfoListjyxx1_ys']/text()"))
        all_page_number = int(temp.split("/")[1])
        __VIEWSTATE, __VIEWSTATEGENERATOR = self.get_formdata_info(url)
        if not response:
            raise Exception('获取总页数失败')
        return all_page_number, __VIEWSTATE, __VIEWSTATEGENERATOR

    def get_formdata_info(self, response):
        """
        先访问第一页获取第一页的信息，和请求form_data中的两个参数信息
        """
        res = etree.HTML(response)
        __VIEWSTATE = "".join(res.xpath("//input[@id='__VIEWSTATE']/@value"))
        __VIEWSTATEGENERATOR = "".join(res.xpath("//input[@id='__VIEWSTATEGENERATOR']/@value"))
        return __VIEWSTATE, __VIEWSTATEGENERATOR

    # 今天只能获取昨天的数据 所以应该构造成前一天的零点时间
    def get_lingdian_time(self, time_str):
        time_str_ = str(time_str - timedelta(days=1))[0:10] + " 00:00:00"
        time_ = datetime.strptime(str(time_str_), '%Y-%m-%d %H:%M:%S')
        return time_

    def parse_xunhuan(self, result):
        """
        翻页获取数据
        """
        _id, industry_text, industry_value, url, page = \
            result["_id"], result["industry_text"], result["industry_url"], result["url"], int(result["page"])
        # 获取总页数以及请求第一页的时候需要的参数
        all_page_number, __VIEWSTATE, __VIEWSTATEGENERATOR = self.parse_all_page_number(url)
        try:
            last_begin_time = result['last_begin_time']
        except Exception as err:
            last_begin_time = self.get_lingdian_time(datetime.now())
            self.quanguo_liaoning_params.update_one({'_id': result['_id']},
                                                    {'$set': {'last_begin_time': last_begin_time}})
        # 判断开始日期和当前日期是否是同一个月
        dangyue_flag = False
        if result.get('begin_date', '1900-01') == str(date.today())[0:7]:
            dangyue_flag = True
        while True:
            formdata = self.generate_formdata(page, __VIEWSTATE, __VIEWSTATEGENERATOR)
            response = send_request(method='POST', url=url, headers=self.headers, data=formdata)
            __VIEWSTATE, __VIEWSTATEGENERATOR = self.get_formdata_info(response)
            if not response:
                raise Exception('该页状态码异常')
            if page > all_page_number:
                if dangyue_flag:
                    self.quanguo_liaoning_params.update_one({'_id': _id}, {'$set': {'page': 1}})
                else:
                    self.quanguo_liaoning_params.update_one({'_id': _id}, {'$set': {'status': 2}})
                break
            title_list, originalurl_list, tenderaddress_list, ifbprogresstag_list, publishdate_list, break_point = self.parse_data(
                response, last_begin_time, dangyue_flag)
            self.save_data(title_list, originalurl_list, tenderaddress_list, ifbprogresstag_list, publishdate_list,
                           industry_text, page, _id, url)
            try:
                pass
            except Exception as err:
                break_point = False
                self.m.info("parse_xunhuan  请求数据失败 ：：：失败原因是%s " % err)
            # 更新数据库中的当前页
            self.m.info('%s 的 url为%s 第%s页的数据获取完毕' % (
            industry_text, url.replace("http://www.lnggzy.gov.cn/lnggzy/showinfo/Morejyxx.aspx?", ""), page))
            self.quanguo_liaoning_params.update_one({'_id': _id}, {'$set': {'page': page, 'status': 1}})
            if break_point:
                # 在这把数据库中的时间改成现在的时间
                self.quanguo_liaoning.update_one({'_id': result['_id']},
                                                 {'$set': {'last_begin_time': self.get_lingdian_time(datetime.now())}})
                break
            page = page + 1
        # params表中该日期区间更新完毕状态改成2
        self.quanguo_liaoning_params.update_one({'_id': result['_id']}, {
            '$set': {'page': page, 'status': 2, 'last_begin_time': self.get_lingdian_time(datetime.now())}})

    def parse_data(self, response, last_begin_time, dangyue_flag):
        """
        解析目录页数据
        """
        res = etree.HTML(response)
        div_list = res.xpath("//div[@class='publicont']")
        title_list = []
        originalurl_list = []
        tenderaddress_list = []
        ifbprogresstag_list = []
        publishdate_list = []
        break_ponit = False
        for div in div_list:
            originalurl = "".join(div.xpath("./div/h4/a/@href"))
            if not originalurl:
                continue
            originalurl_list.append("http://www.lnggzy.gov.cn" + originalurl)
            title = "".join(div.xpath("./div/h4/a/@title"))
            if not title:
                continue
            title_list.append(title)
            publishdate = "".join(div.xpath("./div/h4/span/text()")).strip() + ' 00:00:00'
            # 如果dangyue_flag为True表示是当月的数据做增量判断
            if dangyue_flag:
                if datetime.strptime(publishdate, '%Y-%m-%d %H:%M:%S') < last_begin_time:
                    break_ponit = True
            publishdate_list.append(publishdate)
            tenderaddress = "".join(div.xpath("./div/p/span[2]/text()"))
            if not tenderaddress:
                continue
            tenderaddress_list.append(tenderaddress)
            ifbprogresstag = "".join(div.xpath("./div/p/span[6]/text()"))
            if not ifbprogresstag:
                continue
            ifbprogresstag_list.append(ifbprogresstag)
        return title_list, originalurl_list, tenderaddress_list, ifbprogresstag_list, publishdate_list, break_ponit

    def md5_url(self, url):
        """
        md5 url
        """
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def insert_data(self, **kwargs):
        """
        批量目录页插入到mongo数据库
        """
        originalurl = kwargs["originalurl"]
        md5_url = self.md5_url(kwargs["originalurl"])
        self.local.insert_data_list.append(UpdateOne({
            "originalurl": originalurl
        },
            {"$set": {
                "industry": kwargs["industry"], "industry_url": kwargs["industry_url"],
                "industryv2": kwargs["industryv2"],
                "ifbprogress": kwargs["ifbprogress"],
                "ifbprogresstag": kwargs["ifbprogresstag"], "channelname": kwargs["channelname"],
                "province": kwargs["province"], "city": kwargs["city"], "title": kwargs["title"],
                "publishdate": kwargs["publishdate"],
                "publishyear": kwargs["publishyear"],
                "projectno": kwargs["projectno"], "sourceplatform": kwargs["sourceplatform"], "agent": kwargs["agent"],
                "plan_number": kwargs["plan_number"], "ifbunit": kwargs["ifbunit"],
                "originalurl": kwargs["originalurl"], "md5_originalurl": md5_url, "SnapShot": kwargs["SnapShot"],
                "tenderaddress": kwargs["tenderaddress"], "createdate": kwargs["createdate"], "ip": kwargs["ip"],
                "executor": kwargs["executor"], "country": kwargs["country"],
                "update_time": kwargs["createdate"], "version_num": 1, "is_parse_html": 0, "page": kwargs["page"],
                "weather_have_iframe": kwargs["weather_have_iframe"], "image_status": kwargs["image_status"],
                "weather_have_image": kwargs["weather_have_image"],
                "weather_have_pdf": kwargs["weather_have_pdf"],
                "weather_have_pdf_type2": kwargs["weather_have_pdf_type2"],
                "url_type": kwargs["url_type"], "original_website_id": kwargs["original_website_id"],
                "weather_have_enclosure": kwargs["weather_have_enclosure"],
                "weather_have_blank_url": kwargs["weather_have_blank_url"]
            }}, upsert=True
        ))
        if len(self.local.insert_data_list) >= 100:
            try:
                self.quanguo_liaoning.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据%s" % (len(self.local.insert_data_list), e))
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def create_indexes(self):
        """
        创建索引
        """
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            for index in self.quanguo_liaoning.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html_id" in index["name"]:
                    html_id_index = False
            if temp_url_index:
                self.quanguo_liaoning.create_index([("originalurl", 1)], unique=True, background=True)
            if temp_status_index:
                self.quanguo_liaoning.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.quanguo_liaoning.create_index([("industry", 1), ("SnapShot", 1), ("ifbprogress", 1)],
                                                   background=True)
                self.quanguo_liaoning.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.quanguo_liaoning.create_index([("update_time", 1)], background=True)
            if html_id_index:
                self.quanguo_liaoning.create_index([("title", 1), ("ifbprogresstag", 1), ("html_id", 1)],
                                                   background=True)
        self.index_status = False

    def judge_ifbprogress(self, ifbprogresstag):
        """
        判断ifbprogress
        """
        if "成交" in ifbprogresstag or "合同" in ifbprogresstag or "中标" in ifbprogresstag:
            ifbprogress = "中标公告"
        else:
            ifbprogress = "招标公告"
        return ifbprogress

    def generate_tenderaddress(self, tenderaddress):
        """
        生成完整的地址信息
        """
        if tenderaddress == "省本级":
            province = "辽宁省"
            city = ""
            country = ""
            tenderaddress = "辽宁省"
        else:
            province = "辽宁省"
            city = tenderaddress
            country = ""
            tenderaddress = "辽宁省-" + city
        return province, city, country, tenderaddress

    def save_data(self, title_list, originalurl_list, tenderaddress_list, ifbprogresstag_list, publishdate_list,
                  industry_text, page, _id, url):
        """
        存储目录页流程
        """
        for title, originalurl, tenderaddress, ifbprogresstag, publishdate in zip(title_list, originalurl_list,
                                                                                  tenderaddress_list,
                                                                                  ifbprogresstag_list,
                                                                                  publishdate_list):
            ifbprogress = self.judge_ifbprogress(ifbprogresstag)
            province, city, country, tenderaddress = self.generate_tenderaddress(tenderaddress)
            publishyear = publishdate[:4]
            self.insert_data(industry=industry_text, industry_url=url, industryv2="", ifbprogress=ifbprogress,
                             ifbprogresstag=ifbprogresstag,
                             channelname="", province=province, city=city, title=title, TwoLvTitle="",
                             publishdate=publishdate,
                             publishyear=publishyear, projectno="", sourceplatform="全国公共资源交易平台（辽宁省）",
                             originalurl=originalurl, tenderaddress=tenderaddress, html="", createdate=datetime.now(),
                             SnapShot="", ip=self.myself_ip, executor="GU", ifbunit="", agent="",
                             text_xpath="//div[@class='ewb-whitebg']", page=page, weather_have_iframe=0,
                             weather_have_image=0,
                             weather_have_enclosure=1, weather_have_pdf=0, weather_have_pdf_type2=0,
                             url_type=1, original_website_id=70, weather_have_blank_url=0,
                             image_status=0,
                             plan_number="", country=country, id=_id)

    def generate_TwoLvTitle(self, res):
        """
        解析二级标题
        """
        res = etree.HTML(res)
        TwoLvTitle = "".join(res.xpath("//form[@id='form1']/div[4]/text()")) \
            .replace("(", "（").replace(")", "）").replace("[", "【").replace("]", "】")

        return TwoLvTitle

    def judge_xpath_err(self, text_xpath, result):
        """
        判断xpath_err
        """
        if text_xpath:
            industry = result["industry"]
            ifbprogress = result["ifbprogress"]
            if ifbprogress == "中标公告":
                try:
                    SnapShot = result["SnapShot"]
                except:
                    SnapShot = ""
                if SnapShot:
                    image_status = 2
                    xpath_err = 0
                else:
                    image_status = 0
                    xpath_err = 1
            else:
                image_status = 0
                xpath_err = 0
        else:
            xpath_err = 1
            image_status = 0
        return xpath_err, image_status

    def judge_html_(self, html, text_xpath, originalurl):
        """
        判断html是否存在
        """
        html_id = get_all_text(html, text_xpath)
        if html_id:
            result = self.quanguo_liaoning.find_one(
                {"originalurl": originalurl, "html_id": html_id})
            if result:
                return False
            else:
                return html_id

    def get_xpath(self, industry, ifbprogresstag):
        """
        获得具体的xpath
        """

        for k, v in self.text_xpath.items():
            if industry + ifbprogresstag == k:
                text_xpath = "//div[@id='%s']" % v
                break
        else:
            text_xpath = ""
        return text_xpath

    def generate_second_url(self, originalurl, industry):
        """
        生成真正的url
        """
        url = originalurl.replace("InfoDetail", "ZtbInfo")

        if industry == "政府采购":
            url = url.replace("Default", "Zfcg")
        elif industry == "建设工程":
            url = url.replace("Default", "Jscg")
        elif industry == "国有产权":
            url = url.replace("Default", "Cqjy")
        elif industry == "土地/矿产权":
            url = url.replace("Default", "Tdjy")
        elif industry == "药品采购":
            url = url.replace("Default", "Ypcg")
        else:
            url = originalurl
        return url

    def get_detail_consumer(self):
        """
        更新数据
        """
        self.local.update_data_list = []
        while True:
            result = self.detail_queue.get()
            if not result:
                try:
                    self.quanguo_liaoning.bulk_write(self.local.update_data_list)
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.update_data_list))
                self.m.info("update_data结束")
                self.quanguo_liaoning.update_many(
                    {"SnapShot": "",
                     "$in": [{"industry": "工程建设"}, {"industry": "政府采购"}, {"industry": "建设工程"}],
                     "ifbprogress": "中标公告"
                     }, {"$set": {"image_status": 0}})
                break
            else:
                try:
                    detail_id, originalurl, title, ifbprogresstag, industry = result["_id"], result["originalurl"], \
                    result[
                        "title"], result["ifbprogresstag"], result["industry"]
                except Exception as e:
                    self.m.error("部分字段缺失%s" % e)
                    continue
                item = dict()
                item["originalurl_data_from"] = {
                    "url": originalurl,
                    "method": "GET",
                    "request_only_data": "",
                    "response_only_data": ""
                }
                tender_unid = originalurl_data_from(item)
                detail_url = self.generate_second_url(originalurl, industry)
                try:
                    detail_res = self.send_rquest_get(url=detail_url, headers=self.headers)
                except Exception as err:
                    print('请求详情页报错，url是%s' % detail_url)
                    continue
                if detail_res:
                    # 构造html_res,删除其他的div标签信息detail_res
                    html_res = detail_res
                    TwoLvTitle = self.generate_TwoLvTitle(detail_res)
                    text_xpath = self.get_xpath(industry, ifbprogresstag)
                    file_json = get_file_json(text_xpath, detail_res)
                    xpath_err, image_status = self.judge_xpath_err(text_xpath, result)
                    if not text_xpath:
                        self.m.error("xpath不全，请查看%s" % originalurl)
                        continue
                    html_id = self.judge_html_(detail_res, text_xpath + "//text()", originalurl)
                    if html_id:
                        """
                        TwoLvTitle,projectno,SrcMoney,agent,ifbunit,AgentContact,AgentPhoneNum,industryv2,file_json
                        """
                        self.local.update_data_list.append(UpdateOne(
                            {"_id": detail_id},
                            {"$set":
                                 {"html": clear_html(html_res),
                                  "TwoLvTitle": title_strip(TwoLvTitle),
                                  "html_id": html_id,
                                  "project_id": "",
                                  "image_status": 2,
                                  "update_time": datetime.now(),
                                  "text_xpath": text_xpath,
                                  # 截图能上传的时候需要修改
                                  # "xpath_err": xpath_err,
                                  "xpath_err": 0,
                                  "status": 2,
                                  "tender_unid": tender_unid,
                                  "sourceplatform_hosts": "www.ahggzyjt.com",
                                  "originalurl_data_from": {
                                      "url": originalurl,
                                      "method": "get",
                                      "request_only_data": {},
                                      "response_only_data": {}
                                  },
                                  "Bid_data_acquisition_format": "HTML",
                                  "file_json": file_json,
                                  }
                             }))
                    else:
                        self.quanguo_liaoning.update_one({"_id": detail_id}, {"$set": {
                            "status": 2,
                            "image_status": 2,
                            "SnapShot": "正文为空",
                            "xpath_err": 1,
                            "file_json": file_json,
                        }})
                else:
                    print(detail_id)
                    self.quanguo_liaoning.update_one({"_id": detail_id}, {"$set": {
                        "status": 2,
                        "image_status": 2,
                        "SnapShot": "正文为空,标书页请求失败",
                        "xpath_err": 1,
                    }})
                if len(self.local.update_data_list) >= 1:
                    self.m.info('正在更新详情页数据... ...')
                    try:
                        self.quanguo_liaoning.bulk_write(self.local.update_data_list)
                    except Exception as e:
                        self.m.error("更新失败%s,%s" % (len(self.local.update_data_list), e))
                    else:
                        self.m.debug("更新成功%s" % len(self.local.update_data_list))
                    finally:
                        self.local.update_data_list.clear()

    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 2}}
        update = {'$set': {'status': 0}}
        self.quanguo_liaoning_params.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.quanguo_liaoning_params.find_one_and_update(filter_, update_, proj,
                                                                        return_document=ReturnDocument.AFTER)

                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1, 'status_time': {'$lt': datetime.now() - timedelta(hours=2)}}
        update_ = {'$set': {'status': 0}}
        self.quanguo_liaoning.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.quanguo_liaoning.find_one_and_update(filter_, update_, proj,
                                                                 return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def screen_shot_product(self, flag=False):
        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.quanguo_liaoning.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogress": "中标公告"},
                                          {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.quanguo_liaoning.find_one_and_update(
                    {"SnapShot": "", "image_status": 0, "ifbprogress": "中标公告"},
                    {"$set": {"image_status": 1}},
                    {"_id": 1, "originalurl": 1, "text_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def screen_shot_consumer(self):
        """
        上传图片
        """
        while True:
            if not self.screen_queue.qsize():
                self.m.info('%s:::截图的队列为空休息5s' % datetime.now())
                time.sleep(5)
                continue
            result = self.screen_queue.get()
            if not result:
                self.m.info("screen_shot_consumer结束")
                break
            self.m.info('%s:::url : %s 开始截图' % (datetime.now(), result["originalurl"]))
            upload_image_delete_pic2(result["originalurl"], self.quanguo_liaoning, result['_id'])

    # 更新目录表
    def chushihua_sql(self):
        # 初始化目录表
        self.get_params()
        # 按照end_date进行排序并找出来最后四个 总共就四个分类 所有就是找出来全部分类  这是需要每天变换url进行增量的
        params_result = self.quanguo_liaoning_params.find().sort('end_date', -1).limit(4)
        today = str(date.today())
        for p_result in params_result:
            if today[0:7] == p_result['end_date'][0:7]:
                url = str(p_result['url']).replace(p_result['end_date'], str(today))
                self.quanguo_liaoning_params.update_one(
                    {'_id': p_result['_id']},
                    {'$set': {
                        'end_date': today,
                        "url": url,
                        'status': 0,
                    }}
                )
            else:
                url_begin_date = p_result['begin_date'][0:6] + str(int(p_result['begin_date'][6:7]) + 1) + '-01'
                url = str(p_result['url']).replace(p_result['begin_date'], url_begin_date).replace(
                    p_result['begin_date'], today)
                self.quanguo_liaoning_params.insert_one(
                    {
                        "_id": ObjectId(),
                        "industry_text": p_result['industry_text'],
                        "industry_url": p_result['industry_url'],
                        "url": url,
                        "page": 1,
                        "status": 0,
                        "begin_date": url_begin_date,
                        "end_date": today
                    }
                )

    def run_thread_list(self):

        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product  #获取截图的生产者
            self.get_menu_producer_consumer,  # 获取列表的消费者

        ]
        self.more_thread_name_list = [
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer         #获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        self.chushihua_sql()  # 所有的筛选种类入库
        # self.add_page_list_product(True)  # 获取列表的生产者
        # self.get_page_list_consumer() # 获取列表的消费者

        # self.add_detail_to_queue(True)  # 获取详情的生产者
        # self.get_detail_consumer()  # 获取详情的消费者

        # self.screen_shot_product(True)  # 获取截图的生产者
        # self.screen_shot_consumer()  # 获取截图的消费者


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'lizhiheng_db2'
    quanguo_liaoning = LiaoNing(db_name)
    # quanguo_liaoning.chushihua_sql()
    quanguo_liaoning.run()
