#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
"""
@author:    lizhiheng
@date:      2021/12/27
@software:  PyCharm
@file:      lzl_xianning_publish_data.py
@project:   tender_project
@time:      14:35
@user:      Administrator
"""
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue
import requests
BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree
from pymongo import UpdateOne, ReturnDocument
requests.packages.urllib3.disable_warnings()
from base_spider import BaseSpider
from conf.conf_util import get_all_text, get_file_json, title_strip, upload_image_delete_pic2, \
    sha256_all_text, clear_html
from conf.database import DATABASE
from conf.dber import MongoDBer
from conf.logging_debug import MyLogging

# 批量更新
class HNZBXH(BaseSpider):
    def __init__(self, db_name):
        super().__init__()
        self.headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
        }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.hnzbxh_param = self.db_m["henanzhaobiaoxiehui_param"]
        self.hnzbxh = self.db_m["henanzhaobiaoxiehui_data"]
        self.xpaths = ["//div[@class='detail']"]
        #日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip() #获得本机ip

    def get_params(self):
        """
        获取筛选参数的流程
        """
        param_dict = {
            '招标信息': {'招标公告': ['招标公告',   'http://hnzbcg.cn/zhaobiao/list1.html', 0, '招标公告'],
                     '中标候选人': ['中标候选人',   'http://hnzbcg.cn/zhaobiao/list7.html', 0, '中标公告'],
                     '结果公示': ['结果公示',   'http://hnzbcg.cn/zhaobiao/list8.html', 0, '中标公告'],
                     '变更公告': ['变更公告',   'http://hnzbcg.cn/zhaobiao/list6.html', 0, '招标公告'],
                    },

        }
        for industry in param_dict:
            for ifbprogress in param_dict[industry]:
                data_dict = {
                    "ifbprogress": param_dict[industry][ifbprogress][3],
                    "ifbprogresstag": ifbprogress,
                    "link": param_dict[industry][ifbprogress][1],
                    "industry": industry,
                    "page": 1,
                    "status": 0
                }
                sql_res = self.hnzbxh_param.find_one({
                    "ifbprogress": param_dict[industry][ifbprogress][3],
                    "ifbprogresstag": ifbprogress,
                    "link": param_dict[industry][ifbprogress][1],
                    "industry": industry,
                })
                if not sql_res:
                    self.hnzbxh_param.insert_one(data_dict)

    def get_all_number_page(self, res, param_result):
        """
        获得总页数
        """
        try:
            all_number_total_xpath = etree.HTML(res)
            all_number_total_list = all_number_total_xpath.xpath('//div[@class="pages"]/a/@href')
            if all_number_total_list != []:
                all_number_total_ = all_number_total_list[-1].split('_')[1].replace('.html', '')
            else:
                all_number_total_ = 1
            return all_number_total_
        except:
            all_number_total_ = 1
            return  all_number_total_

    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = False
        title_list = []
        originalurl_list = []
        publishdate_list = []
        param_res_xpath = etree.HTML(param_res)
        param_res_xpath_list = param_res_xpath.xpath('.//ul[@class="detail"]/li')
        for result_ in param_res_xpath_list:
            originalurl_ = result_.xpath('.//div[@class="left fontover"]/a/@href')
            if originalurl_ != []:
                originalurl = "http://hnzbcg.cn" + str(originalurl_[0].strip())
            else:
                originalurl = ''
            originalurl_list.append(originalurl)
            title_ = result_.xpath('.//div[@class="left fontover"]/a/text()')
            if title_ != []:
                title = title_[0].strip()
            else:
                title = ''
            print(title)
            title_list.append(title)
            publishdate_ = result_.xpath('.//div[@class="right"]/text()')
            if publishdate_ != []:
                publishdate_1 = publishdate_[0].strip()
            else:
                publishdate_1 = '1970-01-01'
            publishdate = datetime.strptime(publishdate_1,"%Y-%m-%d")
            # 增量更新的时候做判断
            if param_result.get('day_flag'):
                if publishdate < datetime.now()-timedelta(days=1):
                    day_end_flag = True
                    break
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag,

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            utime_index = True
            html_id_index = True
            html_index = True
            for index in self.hnzbxh.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "utime" in index["name"]:
                    utime_index = False
                if "html_id" in index["name"]:
                    html_id_index = False
                if "html" in index["name"]:
                    html_index = False
            if temp_url_index:
                self.hnzbxh.create_index([("originalurl", 1)], unique=True, background=True)
            if temp_status_index:
                self.hnzbxh.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.hnzbxh.create_index([("industry", 1), ("SnapShot", 1), ("ifbprogress", 1)],
                                                  background=True)
                self.hnzbxh.create_index([("title", 1), ("ifbprogresstag", 1), ("html_id", 1)],
                                     background=True)
                self.hnzbxh.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
                self.hnzbxh.create_index(
                    [ ("SnapShot", 1), ("ifbprogress", 1) ], background=True)
            if utime_index:
                self.hnzbxh.create_index([("utime", 1)], background=True)
            if html_id_index:
                self.hnzbxh.create_index([("html_id", 1)], background=True)
            if html_index:
                self.hnzbxh.create_index([("html", 1)], background=True)
            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def save_data(self, title_list, originalurl_list, publishdate_list, page,param_result):
        """
        存储数据
        """
        for title, originalurl, publishdate in zip(
                title_list, originalurl_list, publishdate_list):
            publishyear = str(publishdate)[:4]
            ctime = datetime.now()
            uuid = ""
            self.insert_data(param_result['industry'], "", param_result['ifbprogress'], param_result['ifbprogresstag'], "", "河南省", '', title,
                             publishdate, publishyear, '', '河南省招标投标协会', originalurl, '',
                             ctime, "", self.myself_ip, "GU", "", "", page, weather_have_iframe=0,
                             weather_have_image=0,weather_have_pdf=0, weather_have_pdf_type2=0, url_type='html',
                             original_website_id=91,weather_have_blank_url=0, weather_have_enclosure=0,
                             uuid=uuid, image_status=0)

    def insert_data(self,industry, industryv2, ifbprogress, ifbprogresstag, channelname, province, city, title,
                    publishdate, publishyear, projectno, sourceplatform, originalurl, tenderaddress,
                    ctime, SnapShot, ip, executor, ifbunit, text_xpath, page, weather_have_iframe,
                    weather_have_image,weather_have_pdf,weather_have_pdf_type2,url_type,
                    original_website_id, weather_have_blank_url,weather_have_enclosure,
                    uuid, image_status):
        """
        插入数据
         html":res,
        "TwoLvTitle": TwoLvTitle,
        "update_time": now_time,
        "text_xpath":xpath,
        "xpath_err":xpath_err,
        "status":2
        """
        md5_url = self.md5_url(originalurl)
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl": originalurl}, {
                "$set": {
                    "industry": industry, "industryv2": industryv2, "ifbprogress": ifbprogress,
                    "ifbprogresstag": ifbprogresstag, "channelname": channelname,
                    "province": province, "city": city, "title": title_strip(title),
                    "publishdate": publishdate, "publishyear": publishyear,
                    "projectno": projectno, "sourceplatform": sourceplatform,
                    "originalurl": originalurl, "md5_originalurl": md5_url, "tenderaddress": tenderaddress,
                    "ctime": ctime, "SnapShot": SnapShot, "ip": ip, "executor": executor,
                    "utime": ctime, "version_num": 1,
                    "is_parse_html": 0, "ifbunit": ifbunit, "page": page,
                    "weather_have_iframe": weather_have_iframe, "weather_have_image": weather_have_image,
                    "weather_have_pdf": weather_have_pdf, "weather_have_pdf_type2": weather_have_pdf_type2,
                    "url_type": url_type, "original_website_id": original_website_id,
                    "weather_have_enclosure": weather_have_enclosure,
                    "weather_have_blank_url": weather_have_blank_url, "uuid": uuid,
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.hnzbxh.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def parse_xunhuan(self,param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.hnzbxh_param.update_one({'_id': param_result['_id']},
                                          {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        #获得总页数
        param_url = param_result['link']
        param_res = self.send_rquest_get(url=param_url )
        all_number_page = self.get_all_number_page(param_res, param_result)
        print('总页数是%s' % all_number_page)
        while True:
            if page > int(all_number_page):
                self.hnzbxh_param.update_one({'_id': param_result['_id']},
                                          {'$set': {'page': page, 'day_flag': True,'all_start_time': datetime.now()}})
                break
            param_url = param_result['link'].replace('.html', '_%s.html' % str(page))
            param_res = self.send_rquest_get(url=param_url, headers=self.headers)
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag = self.parse_data(param_res, param_result)
                self.save_data(title_list, originalurl_list, publishdate_list, page,param_result)
                if day_end_flag:
                    # 在这把数据库中的时间改成现在的时间
                    self.hnzbxh_param.update_one({'_id': param_result['_id']},
                                              {'$set': {'day_flag': True,'page':1, 'all_start_time': datetime.now()}})
                    self.m.info('%s 的%s的第%s页增量成功' % (param_result['industry'], param_result['ifbprogress'], page))
                    break
                self.m.info('%s 的%s的第%s页获取数据成功' % (param_result['industry'], param_result['ifbprogress'], page))
            else:
                print('没有数据')
            # 把当前爬取到的页数存在数据库里
            self.hnzbxh_param.update_one({'_id': param_result['_id']}, {'$set': {'page': page, 'status': 1}})
            page = page + 1
        # 该分类爬取完毕把爬取到的页数存在数据库里
        self.hnzbxh_param.update_one({'_id': param_result['_id']}, {'$set': {'status': 2, }})
        self.m.info('%s 的%s获取数据完毕' % (param_result['industry'], param_result['ifbprogress']))

    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        while True:
            param_result = self.params_queue.get()
            if not param_result:
                try:
                    self.hnzbxh.bulk_write(self.local.insert_data_list)
                except Exception as err:
                    self.m.error("get_data结束后写入缓存数据失败,原因是:%s" % err)
                break
            else:
                self.parse_xunhuan(param_result)

    def judge_xpath_err(self,detail_result):
        if (detail_result['industry'] == "工程建设" or detail_result['industry'] == "建设工程" or detail_result['industry'] == "政府采购") and detail_result['ifbprogress'] == "中标公告":
            try:
                SnapShot = detail_result["SnapShot"]
            except:
                SnapShot = ""
            if SnapShot:
                image_status = 2
                xpath_err = 0
            else:
                image_status = 0
                xpath_err = 0
        else:
            image_status = 0
            xpath_err = 0
        return image_status,xpath_err

    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            if not detail_result:
                try:
                    self.hnzbxh.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))

                self.hnzbxh.update_many({"SnapShot": {"$exists": False}, "ifbprogress": "中标公告"},
                                    {"$set": {"SnapShot": "", "image_status": 0}})
                self.hnzbxh.update_many({"SnapShot": "", "ifbprogress": "中标公告"}, {"$set": {"image_status": 0}})
                break
            else:
                try:
                    detail_res_ = self.send_rquest_get(url=detail_result['originalurl'], headers=self.headers)
                    if detail_res_:
                        xpath = self.xpaths[0]
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                        TwoLvTitle = detail_result['title']
                        detail_html = detail_res_
                        html_id = get_all_text(detail_res_, xpath + "//text()")
                        if html_id:
                            #判断该条数据在数据库中是否存在
                            detail_res = self.hnzbxh.find_one({"originalurl": detail_result['originalurl'],"html_id": html_id})
                            if not detail_res:
                                file_json = get_file_json(xpath, detail_html)
                                self.local.get_detail_consumer_list.append(UpdateOne(
                                    {"_id": detail_result["_id"]},
                                    {"$set": {
                                        "html": clear_html(detail_html),
                                        "html_id": html_id,
                                        "image_status": image_status,
                                        "TwoLvTitle": title_strip(TwoLvTitle),
                                        "utime": datetime.now(),
                                        "text_xpath": xpath,
                                        "xpath_err": xpath_err,
                                        "status": 2,
                                        "originalurl_data_from": {
                                            "url": detail_result['originalurl'],
                                            "method": "get",
                                            "request_only_data": {},
                                            "response_only_data": {}
                                        },
                                        "file_json": file_json,
                                        "Bid_data_acquisition_format": "HTML",
                                    }}
                                ))
                            else:
                                self.hnzbxh.update_one({"_id": detail_result["_id"]
                                                                 }, {"$set": {
                                    "status": 2,
                                    "image_status": 2,
                                    "SnapShot": "该数据为重复数据，无需上传截图",
                                    "xpath_err": 1,
                                }})
                    else:
                        self.hnzbxh.update_one({"_id": detail_result["_id"]}, {"$set": {
                            "status": 2,
                            "image_status": 2,
                            "html": "",
                            "SnapShot": "正文部分为空",
                            "xpath_err": 1,
                        }})
                    if len(self.local.get_detail_consumer_list) >= 1:
                        try:
                            self.hnzbxh.bulk_write(self.local.get_detail_consumer_list)
                        except Exception as e:
                            self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                        else:
                            self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                        finally:
                            self.local.get_detail_consumer_list.clear()
                except Exception as  err:
                    print('更新详情页报错,url是%s' %detail_result['originalurl'])

    def screen_shot_consumer(self):
        while True:
            result = self.screen_queue.get()
            if not result:
                print("upload_images结束")
                break
            url, id = result["originalurl"], result["_id"]
            try:
                text_xpath = result["text_xpath"]
            except:
                continue
            if not text_xpath:
                continue
            upload_image_delete_pic2(url=url, coll_name="lzl_xianning_publish_data", id=id)

    def add_detail_to_queue(self,flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1, 'status_time': {'$lt': datetime.now() - timedelta(minutes=2)}}
        update_ = {'$set': {'status': 0}}
        self.hnzbxh.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.hnzbxh.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def add_menu_producer_queue(self,flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.hnzbxh_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.hnzbxh_param.find_one_and_update(filter_, update_, proj,
                                                                        return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def screen_shot_product(self,flag=False):

        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.hnzbxh.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogress": "中标公告"},
                                          {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.hnzbxh.find_one_and_update(
                    {"SnapShot": "", "image_status": 0, "ifbprogress": "中标公告"},
                    {"$set": {"image_status": 1}},
                    {"_id": 1, "originalurl": 1, "text_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def run_thread_list(self):
        self.get_params()
        self.thread_name_list = [
            self.add_menu_producer_queue,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product  #获取截图的生产者
        ]
        self.more_thread_name_list = [
            self.get_menu_producer_consumer,  # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer         #获取截图的消费者
        ]

if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    hnzbxh_ = HNZBXH(db_name)
    # hb.get_params()
    hnzbxh_.run()