#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-

import json
import random
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument

from base_spider import BaseSpider
from conf.conf_util import title_strip, upload_image_delete_pic2, send_request, \
    sha256_all_text, clear_html, parse_xpath, judge_xpath
from conf.database import DATABASE
from conf.dber import MongoDBer
from conf.logging_debug import MyLogging


# 批量更新
class Jinanggzy(BaseSpider):
    def __init__(self, db_name):
        super(Jinanggzy, self).__init__()
        self.headers = {
            # "Cookie": "JSESSIONID=325F32A63F535B0ECC371B5881FAAE44; jfe_pin=edb5223c; jfe_ts=1667356652.668; jfe_sn=cj4oWRHJrJdcLhbDnSWPd5qP5hU=",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/103.0.0.0 Safari/537.36",
        }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        # self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.db_m1 = MongoDBer(DATABASE['bendi'])  # mongodb 库连接对象
        self.Jinanggzy_param = self.db_m1["jinanggzy_param"]
        self.Jinanggzy_param_err = self.db_m1["jinanggzy_param_err"]
        self.Jinanggzy_data = self.db_m1["jinanggzy_data"]
        self.xpaths = ['//div[@class="list"]/table', ]  # 详情页正文
        # 日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()  # 获得本机ip

    def get_params(self):
        """
        获取筛选参数的流程
        """
        param_dict = {
            "建设工程": {
                "招标计划": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "0",
                             "招标公告", "建设工程", '招标计划'],
                "中标候选人公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "预中标公示", '0',
                                   "中标公告", "建设工程", '中标候选人公示'],
                "中标结果公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", '0',
                                 "中标公告", "建设工程", '中标结果公告'],
                "资格预审公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "资格预审公示", '0',
                                 "招标公告", "建设工程", '资格预审公示'],
                "废标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "废标公告", '0',
                             "招标公告", "建设工程", '废标公示'],
            },
            "政府采购": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", '1',
                             "招标公告", "政府采购", '招标公告'],
                "中标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", '1',
                             "中标公告", "政府采购", '中标公告'],
                "变更公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "变更公告", '1',
                             "招标公告", "政府采购", '变更公告'],
                "废标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "废标公告", '1',
                             "招标公告", "政府采购", '废标公示'],
            },
            "土地矿产": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "2",
                             "招标公告", "土地矿产", '招标公告'],
                "结果公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", "2",
                             "中标公告", "土地矿产", '结果公示'],
            },
            "产权交易": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "3",
                             "招标公告", "产权交易", '招标公告'],
                "中标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", "3",
                             "中标公告", "产权交易", '中标公告'],
                "变更公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "变更公告", "3",
                             "招标公告", "产权交易", '变更公告'],
            },
            "水利工程": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "4",
                             "招标公告", "水利工程", '招标公告'],
                "预中标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "预中标公示", "4",
                               "中标公告", "水利工程", '预中标公示'],
                "中标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", "4",
                             "中标公告", "水利工程", '中标公告'],
                "变更公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "变更公告", "4",
                             "招标公告", "水利工程", '变更公告'],
            },
            "铁路工程": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "5",
                             "招标公告", "铁路工程", '招标公告'],
                "预中标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "预中标公示", "5",
                               "中标公告", "铁路工程", '预中标公示'],
                "中标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", "5",
                             "中标公告", "铁路工程", '中标公告'],
                "变更公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "变更公告", "5",
                             "招标公告", "铁路工程", '变更公告'],
            },
            "交通工程": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "6",
                             "招标公告", "交通工程", '招标公告'],
                "预中标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "预中标公示", "6",
                               "中标公告", "交通工程", '预中标公示'],
                "中标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", "6",
                             "中标公告", "交通工程", '中标公告'],
                "变更公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "变更公告", "6",
                             "招标公告", "交通工程", '变更公告'],
            },
            "其他项目": {
                "招标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "招标公告", "7",
                             "招标公告", "其他项目", '招标公告'],
                "预中标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "预中标公示", "7",
                               "中标公告", "其他项目", '预中标公示'],
                "中标公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "中标公告", "7",
                             "中标公告", "其他项目", '中标公告'],
                "变更公告": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "变更公告", "7",
                             "招标公告", "其他项目", '变更公告'],
                "废标公示": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/search.do", "废标公告", "7",
                             "招标公告", "其他项目", '废标公告'],
            },
            "金融资产交易": {
                "资产招商": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/assets/queryList.do", "招标公告", "1",
                             "招标公告", "金融资产交易", '资产招商'],
                "资产挂牌": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/assets/queryList.do", "预中标公示", "3",
                             "招标公告", "金融资产交易", '资产挂牌'],
                "成交信息": ["http://jnggzy.jinan.gov.cn/jnggzyztb/front/assets/queryList.do", "中标公告", "2",
                             "中标公告", "金融资产交易", '成交信息'],
            }
        }
        for industry in param_dict:
            for ifbprogress in param_dict[industry]:
                data_dict = {
                    "industry": param_dict[industry][ifbprogress][4],
                    "ifbprogresstag": param_dict[industry][ifbprogress][5],
                    "ifbprogress": param_dict[industry][ifbprogress][3],
                    "link": param_dict[industry][ifbprogress][0],
                    "xuanxiang": param_dict[industry][ifbprogress][1],
                    "_type": param_dict[industry][ifbprogress][2],
                    "page": 1,
                    "status": 0
                }
                sql_res = self.Jinanggzy_param.find_one({
                    "industry": param_dict[industry][ifbprogress][4],
                    "ifbprogresstag": param_dict[industry][ifbprogress][5],
                    "ifbprogress": param_dict[industry][ifbprogress][3],
                    "link": param_dict[industry][ifbprogress][0],
                    "xuanxiang": param_dict[industry][ifbprogress][1],
                    "_type": param_dict[industry][ifbprogress][2],
                })
                if not sql_res:
                    self.Jinanggzy_param.insert_one(data_dict)

    def get_all_number_page(self, param_res, param_result):
        """
        获得总页数
        """
        try:
            res_ = json.loads(param_res)
            all_number_page = res_["params"]["pagesum"]

            return int(all_number_page)
        except:
            return 1

    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = False
        title_list = []
        county_list = []
        originalurl_list = []
        publishdate_list = []
        param_res_xpath = json.loads(param_res)
        # 列表页
        param_res_js_list = param_res_xpath['params']["str"]
        param_res_xpath_list = etree.HTML(param_res_js_list)
        param_res_xpath_list = param_res_xpath_list.xpath('//ul[@class="list"]/li')
        for result_ in param_res_xpath_list:
            # 详情页参数
            # 招标公告进入
            if param_result['xuanxiang'] == "招标公告" or param_result['xuanxiang'] == '资格预审公示':
                try:
                    originalurl_ = \
                    result_.xpath('./a/@onclick')[0].replace('showview(', '').replace(')"', '').replace("'", '').split(
                        ',')[0]
                    _type = \
                    result_.xpath('./a/@onclick')[0].replace('showview(', '').replace(')"', '').replace("'", '').split(
                        ',')[1]
                    xuanxiang = result_.xpath('./a/@onclick')[0].replace('showview(', '').replace(')"', '').replace("'",
                                                                                                                    '').replace(
                        ")", '').split(',')[2]
                except Exception as e:
                    print(e)
                    continue
                if originalurl_ != '':
                    originalurl = f'http://jnggzy.jinan.gov.cn/jnggzyztb/front/showNotice.do?iid={originalurl_}&xuanxiang={xuanxiang}&isnew={_type}'
                else:
                    originalurl = ''
                originalurl_list.append(originalurl)
            else:
                # 其他
                try:
                    originalurl_ = result_.xpath('./a/@href')[0]
                except Exception as e:
                    print(e)
                    continue
                if originalurl_ != []:
                    originalurl = f'http://jnggzy.jinan.gov.cn{originalurl_}'
                    print(originalurl)
                else:
                    originalurl = ''
                originalurl_list.append(originalurl)

            # 列表页标题
            try:
                title_ = result_.xpath('./a/@title')[0]
            except Exception as e:
                print(e)
                continue
            if title_ != "":
                title = title_.strip()
            else:
                title = ''
            print(title)
            title_list.append(title)
            # 区县级
            try:
                county = result_.xpath('./span[1]/text()')[0]
            except Exception as e:
                print(e)
                continue
            if county != []:
                county = county.strip().replace('[', '').replace(']', '')
            else:
                county = ''
            # 看看是否可以
            if '市本级' not in county:
                print(county)
                county_list.append(county)
            else:
                print("市区去除：" + county)
            try:
                publishdate_ = result_.xpath('./span[2]/text()')[0]
            except Exception as e:
                print(e)
                continue
            if publishdate_ != "":
                publishdate_1 = publishdate_.strip().replace('年', '-').replace('月', '-').replace('日', '')
            else:
                publishdate_1 = '1970-01-01'
            publishdate = datetime.strptime(publishdate_1, "%Y-%m-%d")
            # 增量更新的时候做判断
            if param_result.get('day_flag'):
                if publishdate < datetime.now() - timedelta(days=1):
                    day_end_flag = True
                    break
            if publishdate_1 == '1970-01-01':
                publishdate = ''
            else:
                publishdate = publishdate
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, county_list

    def remove_js_style(self, response):
        """
        删除HTML中的js和css
        """
        try:
            encoding_ = 'utf-8'
            tree = etree.HTML(response)
            ele = tree.xpath("//script | //noscript")
            for e in ele:
                e.getparent().remove(e)
            ele2 = tree.xpath('//style | //nostyle')
            for e2 in ele2:
                e2.getparent().remove(e2)
            Html = html.tostring(tree, encoding=encoding_).decode(encoding_)
            return Html
        except Exception as err:
            print('remove_js_style报错是%s ' % err)
            return False

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            html_index = True
            for index in self.Jinanggzy_data.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html" in index["name"]:
                    html_index = False
            if temp_url_index:
                self.Jinanggzy_data.create_index([("originalurl", 1), ("html_id", 1)], unique=True, background=True)
            if temp_status_index:
                self.Jinanggzy_data.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.Jinanggzy_data.create_index([("SnapShot", 1), ("ifbprogress", 1)],
                                                 background=True)
                self.Jinanggzy_data.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.Jinanggzy_data.create_index([("update_time", 1)], background=True)
            if html_index:
                self.Jinanggzy_data.create_index([("html", 1)], background=True)
            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, county_list):
        """
        存储数据
        """
        for title, originalurl, publishdate, county_ in zip(
                title_list, originalurl_list, publishdate_list, county_list):
            publishyear = str(publishdate)[:4]
            ctime = datetime.now()
            uuid = ""

            self.insert_data(param_result['industry'], "", param_result['ifbprogress'], param_result['ifbprogresstag'],
                             '', '',
                             '山东省', '济南市', county_, title, '', '', '',
                             publishdate, publishyear, '', '济南市公共资源交易中心', originalurl, '',
                             ctime, "", self.myself_ip, "Jjd", "", page, weather_have_iframe=0,
                             weather_have_image=0, weather_have_pdf=0, weather_have_pdf_type2=0, url_type='html',
                             original_website_id=75, weather_have_blank_url=0, weather_have_enclosure=0,
                             uuid=uuid, image_status=0)

    def insert_data(self, industry, industryv2, ifbprogress, ifbprogresstag, zhaocai_type, channelname, province, city,
                    county, title,
                    ifbunit, agent, result_list,
                    publishdate, publishyear, projectno, sourceplatform, originalurl, tenderaddress,
                    ctime, SnapShot, ip, executor, text_xpath, page, weather_have_iframe,
                    weather_have_image, weather_have_pdf, weather_have_pdf_type2, url_type,
                    original_website_id, weather_have_blank_url, weather_have_enclosure,
                    uuid, image_status):
        """
        插入数据
         html":res,
        "TwoLvTitle": TwoLvTitle,
        "update_time": now_time,
        "text_xpath":xpath,
        "xpath_err":xpath_err,
        "status":2
        "result_list": 列表页参数
        """
        md5_url = self.md5_url(originalurl)
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "industry": industry, "industryv2": industryv2, "ifbprogress": ifbprogress,
                    "ifbprogresstag": ifbprogresstag, "channelname": channelname,
                    "province": province, "city": city, "county": county, "title": title_strip(title),
                    "publishdate": publishdate, "publishyear": publishyear, "result_list": result_list,
                    "projectno": projectno, "sourceplatform": sourceplatform,
                    "originalurl": originalurl, "md5_originalurl": md5_url, "tenderaddress": tenderaddress,
                    "ctime": ctime, "SnapShot": SnapShot, "ip": ip, "executor": executor,
                    "utime": ctime, "version_num": 1, "agent": agent, "zhaocai_type": zhaocai_type,
                    "ifbunit": ifbunit, "page": page,
                    "weather_have_iframe": weather_have_iframe, "weather_have_image": weather_have_image,
                    "weather_have_pdf": weather_have_pdf, "weather_have_pdf_type2": weather_have_pdf_type2,
                    "url_type": url_type, "original_website_id": original_website_id,
                    "weather_have_enclosure": weather_have_enclosure,
                    "weather_have_blank_url": weather_have_blank_url, "uuid": uuid,
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.Jinanggzy_data.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def save_param_err(self, param_url, parma_result):
        data_dict = {
            "ifbprogress": parma_result['ifbprogress'],
            "ifbprogresstag": parma_result['ifbprogresstag'],
            "industry": parma_result['industry'],
            "channelname": parma_result['channelname'],
            "status": 0,
            "link": param_url
        }
        sql_res = self.Jinanggzy_param_err.find_one({
            "ifbprogress": parma_result['ifbprogress'],
            "ifbprogresstag": parma_result['ifbprogresstag'],
            "industry": parma_result['industry'],
            "channelname": parma_result['channelname'],
            "status": 0,
            "link": param_url
        })
        if not sql_res:
            self.Jinanggzy_param_err.insert_one(data_dict)

    def parse_xunhuan(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.Jinanggzy_param.update_one({'_id': param_result['_id']},
                                                {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        while True:
            if page == 1:
                param_url = param_result['link']
            else:
                param_url = param_result['link'].replace('moreinfo', str(page))
            print(param_url)
            if param_result['industry'] == '金融资产交易':
                data = {
                    "index": page,
                    "pageSize": "15",
                    "type": param_result['_type'],
                }
            else:
                data = {
                    "area": "",
                    "type": param_result['_type'],
                    "xuanxiang": param_result['xuanxiang'],
                    "subheading": "",
                    "pagenum": page
                }
            # data = json.dumps(data, separators=(',', ':'))
            try:
                param_res = self.send_rquest_post(url=param_url, data=data, headers=self.headers)
            except Exception as err:
                time.sleep(random.randint(10, 15))
                # 失败链接插入到一张表里
                # self.save_param_err(param_url, param_result)
                continue
            all_number_page = self.get_all_number_page(param_res, param_result)
            print('总页数是%s' % all_number_page)
            # 判断循环结束
            if page > int(all_number_page) - 1:
                self.Jinanggzy_param.update_one({'_id': param_result['_id']},
                                                {'$set': {'page': page, 'day_flag': True,
                                                          'all_start_time': datetime.now()}})
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, county_list = self.parse_data(
                    param_res, param_result)

                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, county_list)
                if day_end_flag:
                    # 在这把数据库中的时间改成现在的时间
                    self.Jinanggzy_param.update_one({'_id': param_result['_id']},
                                                    {'$set': {'day_flag': True, 'page': 1,
                                                              'all_start_time': datetime.now()}})
                    self.m.info(
                        '%s 的%s的第%s页增量成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
                    break
                self.m.info(
                    '%s 的%s的第%s页获取数据成功' % (param_result['industry'], param_result['ifbprogresstag'], page))
            else:
                print('没有数据')
            # 把当前爬取到的页数存在数据库里
            self.Jinanggzy_param.update_one({'_id': param_result['_id']}, {'$set': {'page': page, 'status': 1}})
            page = page + 1
            time.sleep(2)
        # 该分类爬取完毕把爬取到的页数存在数据库里
        self.Jinanggzy_param.update_one({'_id': param_result['_id']}, {'$set': {'status': 2, }})
        self.m.info('%s 的%s获取数据完毕' % (param_result['industry'], param_result['ifbprogresstag']))

    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        while True:
            param_result = self.params_queue.get()
            if not param_result:
                try:
                    self.Jinanggzy_data.bulk_write(self.local.insert_data_list)
                except Exception as err:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % err)
                break
            else:
                self.parse_xunhuan(param_result)

    def judge_xpath_err(self, detail_result):
        if (detail_result['industry'] == "工程建设" or detail_result['industry'] == "建设工程" or detail_result[
            'industry'] == "政府采购") and detail_result['ifbprogress'] == "中标公告":
            try:
                SnapShot = detail_result["SnapShot"]
            except:
                SnapShot = ""
            if SnapShot:
                image_status = 2
                xpath_err = 0
            else:
                image_status = 0
                xpath_err = 0
        else:
            image_status = 0
            xpath_err = 0
        return image_status, xpath_err

    def get_all_text(self, res, xpath):
        re_rule = re.compile("[\u4e00-\u9fa5]")
        """
        获取页面主体部分的纯文本
        """
        res = etree.HTML(res)
        res_text = "".join(res.xpath(xpath))

        all_text = "".join(re_rule.findall(res_text))
        if all_text and len(all_text) > 50:
            html_id = sha256_all_text(all_text)
            return html_id
        return ""

    def get_all_text1(self, res, xpath):
        re_rule = re.compile("[\u4e00-\u9fa5a-zA-Z0-9]")
        """
        获取页面主体部分的纯文本
        """
        res = etree.HTML(res)
        res_text = "".join(res.xpath(xpath))

        all_text = "".join(re_rule.findall(res_text))
        if all_text and len(all_text) > 50:
            html_id = sha256_all_text(all_text)
            return html_id
        return ""

    def judge_xpath(self, res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def get_file_json(self, text_xpath, html_):
        rule = re.compile("\.([^.]*)$")
        res = etree.HTML(html_)
        file_url_list1 = res.xpath(text_xpath + "//a/@href")
        file_url_list2 = res.xpath(text_xpath + "//button/@code")
        file_url_list = file_url_list1 + file_url_list2
        file_name_list1 = res.xpath(text_xpath + "//a//text()")
        file_name_list2 = res.xpath(text_xpath + "//button//text()")
        file_name_list = file_name_list1 + file_name_list2
        file_url_real_list = []
        file_name_real_list = []
        for file_url, file_name in zip(file_url_list, file_name_list):
            if ".pdf" in file_url or ".zip" in file_url or ".doc" in file_url or ".rar" in file_url or ".zip" in file_url \
                    or ".xlsx" in file_url or ".pdf" in file_name or ".zip" in file_name or ".doc" in file_name or ".rar" in file_name \
                    or ".zip" in file_name or ".xlsx" in file_name:
                file_url_real_list.append('http://jczx.wjbfwzx.com:80' + file_url)
                file_name_real_list.append(file_name)
        file_type = ["".join(rule.findall(i)) for i in file_url_real_list]
        file_json = {"files": []}
        # 不知道是啥？
        # print(file_name_real_list)
        # print(file_url_real_list)
        # print(file_type)
        for n, u, t in zip(file_name_real_list, file_url_real_list, file_type):
            if n and t and u:
                file_json["files"].append({"file_name": n, "file_url": u, "file_type": t})
        if file_json == {"files": []}:
            file_json = ""
        return file_json

    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        # 内容页获取
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            time.sleep(2)
            if not detail_result:
                try:
                    self.Jinanggzy_data.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))
                break
            else:
                print('开始更新    ' + detail_result['originalurl'])
                url = detail_result['originalurl']
                try:
                    detail_res_all = self.send_rquest_get(url=url, headers=self.headers)
                except:
                    continue
                if detail_res_all:
                    try:
                        xpath = judge_xpath(detail_res_all, self.xpaths)
                    except:
                        xpath = ''
                    if xpath != '':
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                    else:
                        continue
                        # raise Exception('xpath_err url是%s' % detail_result['originalurl'])
                    TwoLvTitle = detail_result['title']
                    html_id = self.get_all_text(detail_res_all, xpath + "//text()")
                    print('html_id是' + html_id)
                    if html_id:
                        try:
                            if detail_result['title'] == '' or html_id == '':
                                xpath_err = 1
                            else:
                                xpath_err = 0
                        except:
                            xpath_err = 1
                        # 判断该条数据在数据库中是否存在
                        detail_res = self.Jinanggzy_data.find_one(
                            {"originalurl": detail_result['originalurl'], "html_id": html_id})
                        if not detail_res:
                            file_json = self.get_file_json(xpath, detail_res_all)
                            self.local.get_detail_consumer_list.append(UpdateOne(
                                {"_id": detail_result["_id"]},
                                {"$set": {
                                    "originalurl": detail_result['originalurl'],
                                    "html": clear_html(detail_res_all),
                                    "html_id": html_id,
                                    "image_status": image_status,
                                    "TwoLvTitle": title_strip(TwoLvTitle),
                                    "utime": datetime.now(),
                                    "text_xpath": xpath,
                                    "xpath_err": xpath_err,
                                    "status": 2,
                                    "originalurl_data_from": {
                                        "url": detail_result['originalurl'],
                                        "method": "get",
                                        "request_only_data": {},
                                        "response_only_data": {}
                                    },
                                    "file_json": file_json,
                                    "Bid_data_acquisition_format": "HTML",
                                }}
                            ))
                        else:
                            self.Jinanggzy_data.update_one({"_id": detail_result["_id"]
                                                            }, {"$set": {
                                "status": 2,
                                "image_status": 2,
                                "err_txt": "该数据为重复数据，无需上传截图",
                                "xpath_err": 1,
                            }})


                else:
                    self.Jinanggzy_data.update_one({"_id": detail_result["_id"]}, {"$set": {
                        "status": 2,
                        "image_status": 2,
                        "html": "",
                        "err_txt": "正文部分为空",
                        "xpath_err": 1,
                    }})
                if len(self.local.get_detail_consumer_list) >= 1:
                    try:
                        self.Jinanggzy_data.bulk_write(self.local.get_detail_consumer_list)
                    except Exception as e:
                        self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                    else:
                        self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                    finally:
                        self.local.get_detail_consumer_list.clear()

    def screen_shot_consumer(self):
        # 截屏
        while True:
            result = self.screen_queue.get()
            if not result:
                print("upload_images结束")
                break
            url, id = result["originalurl"], result["_id"]
            try:
                text_xpath = result["text_xpath"]
            except:
                continue
            if not text_xpath:
                continue
            upload_image_delete_pic2(url=url, coll_name="lzl_hebi_publish_data", id=id)

    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1}
        update_ = {'$set': {'status': 0}}
        self.Jinanggzy_data.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.Jinanggzy_data.find_one_and_update(filter_, update_, proj,
                                                               return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.Jinanggzy_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.Jinanggzy_param.find_one_and_update(filter_, update_, proj,
                                                                return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def screen_shot_product(self, flag=False):

        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.Jinanggzy_data.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogress": "中标公告"},
                                        {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.Jinanggzy_data.find_one_and_update(
                    {"SnapShot": "", "image_status": 0, "ifbprogress": "中标公告"},
                    {"$set": {"image_status": 1}},
                    {"_id": 1, "originalurl": 1, "text_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def run_thread_list(self):
        self.get_params()

        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product  #获取截图的生产者
        ]
        self.more_thread_name_list = [
            self.get_menu_producer_consumer,  # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer         #获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        # self.get_all_type()  # 所有的筛选种类入库
        #
        # # self.add_menu_producer_queue(True)  # 获取列表的生产者
        # # self.get_menu_producer_consumer() # 获取列表的消费者
        #
        # self.add_detail_to_queue(True)  # 获取详情的生产者
        # self.get_detail_consumer()  # 获取详情的消费者
        #
        # self.screen_shot_product(True)  # 获取截图的生产者
        # self.screen_shot_consumer()  # 获取截图的消费者


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    Jinanggzy = Jinanggzy(db_name)
    # Jinanggzy.get_params()\
    # proxy_flag = True
    Jinanggzy.run()
