#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
"""
@author:    GU
@date:      2022/2/23
@software:  PyCharm
@file:      jiangxizhaobiaotoubiao.py
@project:   tender_project
@time:      15:45
@user:      Administrator
"""
import logging
import os
import re
import sys
import threading
import time
from datetime import datetime, timedelta
from hashlib import md5
from queue import Queue
from lxml import etree
from pymongo import UpdateOne, ReturnDocument

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from tender_project.conf.logging_debug import MyLogging
from tender_project.conf.conf_util import title_strip, get_all_text, upload_image_delete_pic2, remove_js_style
from tender_project.base_spider import BaseSpider
from tender_project.conf.database import DATABASE
from tender_project.conf.dber import MongoDBer
from tender_project.conf.conf_util import sha256_all_text


class JiangXi(BaseSpider):
    def __init__(self, db_name):
        super(JiangXi, self).__init__()
        self.url = "http://www.jxtb.org.cn/gongshigg/zhaobiaogg/"
        self.headers = {
            'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 103.0.0.0Safari / 537.36', }
        self.url1 = "https://midd.jianshequan.com"
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.data_status = False
        self.screen_queue = Queue(maxsize=1000)  # url队列  用来截图
        self.url_status = False  # url是否取完
        self.finished = False  # params是否取完
        self.uuid_queue = Queue(maxsize=1000)  # uuid 的队列
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.jx_params = self.db_m["jiangxizhaobiaotoubiao_params"]  # 种类表
        self.jx = self.db_m["jiangxizhaobiaotoubiao"]  # 详情表、数据列表
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.page_rule = re.compile(",'(\d*)'")
        self.title_rule = re.compile("(\[.*\])")
        self.xpaths = ["//div[@id='vsb_content_2']"]
        self.file_type_rule = re.compile("\.(.*)")
        self.myself_ip = self.get_myself_ip()
        self.create_indexes()  # 创建索引

    def get_page_list_consumer(self):
        """
        获取目录消费者
        从队列中读取目录页数据
        """
        self.local.insert_data_list = []
        ''' 获取各个筛选项目录信息 '''
        while True:
            if not self.params_queue.qsize():
                self.m.info('筛选项队列为空,休息5s')
                time.sleep(5)
                continue
            one_data = self.params_queue.get()
            if not one_data:
                try:
                    if self.local.insert_data_list:
                        self.jx.bulk_write(self.local.insert_data_list)
                except Exception as e:
                    self.m.error("get_page_list_consumer结束后写入缓存数据失败,原因是:%s" % e)
                else:
                    self.m.debug("插入成功%s条" % len(self.local.insert_data_list))
                self.m.info("get_page_list_consumer结束")
                break
            else:
                self.page_xunhuan(one_data)

    def get_all_page_number(self, url):
        """
        解析所有的页码
        """
        response = self.send_rquest_get(url)
        res = etree.HTML(response)
        all_page_number = "".join(res.xpath("//div[@id='pages']/b[2]/text()")).replace('/', '')
        all_page_number = int(all_page_number)
        return all_page_number

    def page_xunhuan(self, result):
        """
        翻页获取数据
        """
        # 查询day_flag是否为True
        if not result.get('day_flag'):
            if (datetime.now() - result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.jx_params.update_one({'_id': result['_id']},
                                          {'$set': {'all_start_time': datetime.now() - timedelta(days=3)}})
                page = 1
            else:
                page = result.get('page', 1)
        else:
            page = result.get('page', 1)
        # 总页数
        all_number_page = self.get_all_page_number(result["ifbprogresstag_url"])
        while True:
            if page == 1:
                url = result["ifbprogresstag_url"]
            else:
                url = result["ifbprogresstag_url"] + "?p-%s.html" % page
            response = self.send_rquest_get(url=url, headers=self.headers)
            if not response:
                raise Exception('该页状态码异常')
            # 构造下一个请求
            if page > all_number_page:
                self.jx_params.update_one({'_id': result['_id']}, {
                    '$set': {'page': page, 'day_flag': True, 'all_start_time': datetime.now() - timedelta(days=1)}})
                break
            self.m.info('获取 %s第%s页数据成功' % (result["ifbprogresstag_text"], page))
            industry_list, title_list, originalurl_list, publishdate_list, day_end_flag = self.parse_data(response,
                                                                                                          result)
            self.save_data(industry_list, title_list, originalurl_list, publishdate_list, result["ifbprogresstag_text"],
                           page, result["_id"])
            # 爬取到的当前页更新到数据库
            self.jx_params.update_one({'_id': result["_id"]}, {'$set': {'page': page}})
            if day_end_flag:
                # 在这把数据库中的时间改成现在的时间
                self.jx_params.update_one({'_id': result['_id']}, {
                    '$set': {'day_flag': True, "page": 1, 'all_start_time': datetime.now() - timedelta(days=1)}})
                self.m.info(
                    "%s:::ifbprogresstag_text:::%s 增量爬虫结束" % (datetime.now(), result["ifbprogresstag_text"]))
                break
            page = page + 1
        self.jx_params.update_one({'_id': result["_id"]}, {'$set': {'status': 2}})

    def parse_data(self, response, result):
        """
        解析目录页数据
        """
        res = etree.HTML(response)
        lis = res.xpath("//div[@class='newlist']/ul/li")
        title_list = []
        originalurl_list = []
        publishdate_list = []
        industry_list = []
        day_end_flag = False
        for li in lis:
            title = "".join(li.xpath("./a/text()")).replace("(", "（").replace(")", "）") \
                .replace("[", "【").replace("]", "】")
            if not title:
                continue
            if "工程" in title or "项目" in title or "建设" in title or "改建" in title:
                industry_text = "工程建设"
            else:
                industry_text = "政府采购"
            industry_list.append(industry_text)
            title_list.append(title)
            originalurl = "".join(li.xpath("./a/@href"))
            if not originalurl:
                continue
            originalurl_list.append("http://www.jxtb.org.cn/" + originalurl)
            publishdate_str = "".join(li.xpath("./span/text()"))
            publishdate = datetime.strptime(publishdate_str, "%Y-%m-%d")
            # 增量更新的时候做判断
            if result.get('day_flag'):
                if publishdate < datetime.now() - timedelta(days=5):
                    day_end_flag = True
                    break
            publishdate_list.append(publishdate)
        return industry_list, title_list, originalurl_list, publishdate_list, day_end_flag

    def md5_url(self, url):
        """
        md5 url
        """
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def insert_data(self, **kwargs):
        """
        批量目录页插入到mongo数据库
        """
        originalurl = kwargs["originalurl"]
        md5_url = self.md5_url(kwargs["originalurl"])
        self.local.insert_data_list.append(UpdateOne({
            "originalurl": originalurl
        },
            {"$set": {
                "industry": kwargs["industry"],
                "industryv2": kwargs["industryv2"],
                "ifbprogress": kwargs["ifbprogress"],
                "ifbprogresstag": kwargs["ifbprogresstag"],
                "channelname": kwargs["channelname"],
                "province": kwargs["province"],
                "city": kwargs["city"],
                "agent": kwargs["agent"],
                "title": title_strip(kwargs["title"]),
                "publishdate": kwargs["publishdate"],
                "publishyear": kwargs["publishyear"],
                "projectno": kwargs["projectno"],
                "sourceplatform": kwargs["sourceplatform"],
                "plan_number": kwargs["plan_number"],
                "originalurl": kwargs["originalurl"],
                "md5_originalurl": md5_url,
                "tenderaddress": kwargs["tenderaddress"],
                "createdate": kwargs["createdate"],
                "ip": kwargs["ip"],
                "executor": kwargs["executor"],
                "country": kwargs["country"],
                "update_time": kwargs["createdate"],
                "version_num": 1,
                "is_parse_html": 0,
                "page": kwargs["page"],
                "weather_have_iframe": kwargs["weather_have_iframe"],
                "weather_have_image": kwargs["weather_have_image"],
                "weather_have_pdf": kwargs["weather_have_pdf"],
                "weather_have_pdf_type2": kwargs["weather_have_pdf_type2"],
                "url_type": kwargs["url_type"],
                "original_website_id": kwargs["original_website_id"],
                "weather_have_enclosure": kwargs["weather_have_enclosure"],
                "weather_have_blank_url": kwargs["weather_have_blank_url"]
            }}, upsert=True
        ))
        if len(self.local.insert_data_list) >= 100:
            try:
                self.jx.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据%s" % (len(self.local.insert_data_list), e))
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()

    def create_indexes(self):
        """
        创建索引
        """
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            industry_status_index = True
            update_time_index = True
            html_id_index = True
            for index in self.jx.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "industry" in index["name"]:
                    industry_status_index = False
                if "update_time" in index["name"]:
                    update_time_index = False
                if "html_id" in index["name"]:
                    html_id_index = False
            if temp_url_index:
                self.jx.create_index([("originalurl", 1)], unique=True, background=True)
            if temp_status_index:
                self.jx.create_index([("status", 1)], background=True)
            if industry_status_index:
                self.jx.create_index([("industry", 1), ("SnapShot", 1), ("ifbprogress", 1)],
                                     background=True)
                self.jx.create_index(
                    [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
            if update_time_index:
                self.jx.create_index([("update_time", 1)], background=True)
            if html_id_index:
                self.jx.create_index([("title", 1), ("ifbprogresstag", 1), ("html_id", 1)],
                                     background=True)
        self.index_status = False

    def judge_ifbprogress(self, ifbprogresstag_text):
        """
        判断ifbprogress
        """
        if "招标" in ifbprogresstag_text:
            ifbprogress = "招标公告"
        else:
            ifbprogress = "中标公告"
        return ifbprogress

    def save_data(self, industry_list, title_list, originalurl_list, publishdate_list, ifbprogresstag_text, page, _id):
        """
        存储目录页流程
        """
        for industry, title, originalurl, publishdate in zip(industry_list, title_list, originalurl_list,
                                                             publishdate_list):
            ifbprogress = self.judge_ifbprogress(ifbprogresstag_text)
            publishyear = publishdate.year
            createdate = datetime.now()
            self.insert_data(industry=industry, industryv2="", ifbprogress=ifbprogress,
                             ifbprogresstag=ifbprogresstag_text,
                             channelname="", province="江西省", city="", title=title,
                             publishdate=publishdate,
                             publishyear=publishyear, projectno="", sourceplatform="江西招投标网",
                             originalurl=originalurl, tenderaddress="江西省", createdate=createdate,
                             ip=self.myself_ip, executor="GU", ifbunit="", agent="",
                             page=page, weather_have_iframe=0, weather_have_image=0,
                             weather_have_enclosure=0, weather_have_pdf=0, weather_have_pdf_type2=0,
                             url_type=1, original_website_id=61, weather_have_blank_url=0,
                             image_status=0,
                             status=0, plan_number="", country="", id=_id)

    def get_TwoLvTitle(self, res):
        """
        解析二级标题
        """
        res = etree.HTML(res)
        TwoLvTitle = "".join(res.xpath("//div[@class='title']/text()")). \
            replace("(", "（").replace(")", "）").replace("[", "【").replace("]", "】")
        return TwoLvTitle

    def parse_file(self, res):
        """
        解析附件
        """
        res = etree.HTML(res)
        filename = res.xpath("//div[@class='v_news_content']/p/a/text()")
        fileurl = ["http://www.jxtb.org.cn/" + i for i in res.xpath("//div[@class='v_news_content']/p/a/@href")]
        file_type = ["".join(self.file_type_rule.findall(i)) for i in fileurl]
        file_json = {"files": []}
        for n, u, t in zip(filename, fileurl, file_type):
            file_json["files"].append({"file_name": n, "file_url": u, "file_type": t})
        if file_json == {"files": []}:
            file_json = ""
        return file_json

    def parse_xpath(self, html, xpath):
        """
        判断xpath是否可用
        """
        html_element = etree.HTML(html)
        result = html_element.xpath(xpath)
        if len(result) == 1:
            html = etree.tostring(result[0], encoding="utf-8").decode("utf-8")
            if "<" in html and ">" in html:
                return True
        return False

    def judge_xpath(self, res):
        """
        判断xpath
        """
        for xpath in self.xpaths:
            temp = self.parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""

    def judge_xpath_err(self, text_xpath, result):
        """
        判断xpath_err
        """
        if text_xpath:
            industry = result["industry"]
            ifbprogress = result["ifbprogress"]
            if ifbprogress == "中标公告":
                try:
                    SnapShot = result["SnapShot"]
                except:
                    SnapShot = ""
                if SnapShot:
                    image_status = 2
                    xpath_err = 0
                else:
                    image_status = 0
                    xpath_err = 0
            else:
                image_status = 0
                xpath_err = 0
        else:
            xpath_err = 1
            image_status = 0
        return xpath_err, image_status

    def get_all_text(self, res):
        """
        获取页面主体部分的纯文本
        """
        try:
            re_rule = re.compile("[\u4e00-\u9fa5]")
            all_text = "".join(re_rule.findall(res))
            if all_text and len(all_text) > 50:
                html_id = sha256_all_text(all_text)
                return html_id
            return ""
        except:
            return ""

    def judge_html_(self, html, text_xpath, title, ifbprogresstag):
        """
        判断html是否存在
        """
        # 获取页面的纯文本的加密参数
        html_id = self.get_all_text(html)
        if html_id:
            result = self.jx.find_one(
                {"title": title, "ifbprogresstag": ifbprogresstag, "html_id": html_id})
            if result:
                return False
            else:
                return html_id

    def update_detail(self, result):
        # 更新详情页数据
        res = self.send_rquest_get(url=result["originalurl"], headers=self.headers)
        if res:
            TwoLvTitle = self.get_TwoLvTitle(res)
            file_json = self.parse_file(res)
            text_xpath = self.judge_xpath(res)
            xpath_err, image_status = self.judge_xpath_err(text_xpath, result)
            if not text_xpath:
                self.m.error("xpath不全，请查看%s" % result["originalurl"])
            try:
                html_id = self.judge_html_(res, text_xpath + "//text()", result["title"], result["ifbprogresstag"])
            except Exception as e:
                html_id = ''
                self.m.error('获取html_id失败 ,报错内容是%s ' % e)
                self.jx.update_one({"_id": result["_id"]},
                                   {"$set": {
                                       "status": 2,
                                       "image_status": 2,
                                       "SnapShot": "正文为空",
                                       "xpath_err": 1,
                                       "file_json": file_json
                                   }})

            if html_id != '':
                tender_unid = result.get("tender_unid", "")
                self.local.get_detail_consumer_list.append(UpdateOne(
                    {"_id": result["_id"]},
                    {"$set":
                         {"html": res,
                          "TwoLvTitle": title_strip(TwoLvTitle),
                          "html_id": html_id,
                          "image_status": image_status,
                          "update_time": datetime.now(),
                          "text_xpath": text_xpath,
                          "xpath_err": 0,
                          "is_parse_html": 0,
                          "status": 2,
                          "SnapShot": "",
                          "tender_unid": tender_unid,
                          "sourceplatform_hosts": "www.ahggzyjt.com",
                          "originalurl_data_from": {
                              "url": result["originalurl"],
                              "method": "get",
                              "request_only_data": {},
                              "response_only_data": {}
                          },
                          "Bid_data_acquisition_format": "HTML",
                          "file_json": file_json,
                          }
                     }))
            else:
                self.jx.update_one({"_id": result["_id"]}, {"$set": {
                    "status": 2,
                    "image_status": 2,
                    "SnapShot": "正文为空",
                    "xpath_err": 1,
                    "file_json": file_json,
                }})
        if len(self.local.get_detail_consumer_list) >= 1:
            try:
                self.jx.bulk_write(self.local.get_detail_consumer_list)
            except Exception as e:
                self.m.error("更新失败%s,%s" % (len(self.local.get_detail_consumer_list), e))
            else:
                self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
            finally:
                self.local.get_detail_consumer_list.clear()

    def get_detail_consumer(self):
        """
        更新数据
        """
        self.local.get_detail_consumer_list = []
        while True:
            result = self.detail_queue.get()
            if not result:
                try:
                    self.jx.bulk_write(self.local.get_detail_consumer_list)
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))
                self.m.info("get_detail_consumer结束")
                # self.jx.update_many(
                #     {"SnapShot": {"$exists": False},
                #      "$or": [{"industry": "工程建设"}, {"industry": "政府采购"}, {"industry": "建设工程"}],
                #      "ifbprogress": "中标公告"
                #      }, {"$set": {"SnapShot": "", "image_status": 0}})
                # self.jx.update_many(
                #     {"SnapShot": "", "$or": [{"industry": "工程建设"}, {"industry": "政府采购"}, {"industry": "建设工程"}],
                #      "ifbprogress": "中标公告"
                #      }, {"$set": {"image_status": 0}})
                break
            else:
                # 更新详情页数据
                self.update_detail(result)

    def add_page_list_product(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.jx_params.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.jx_params.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)

    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1, 'status_time': {'$lt': datetime.now() - timedelta(hours=2)}}
        update_ = {'$set': {'status': 0}}
        self.jx.update_many(filter_, update_)
        i = 0
        filter_ = {'$or': [{'status': 0}, {'status': {'$exists': False}}]}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.jx.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def screen_shot_product(self, flag=False):
        """ 截图的生产者线程 """
        self.m.info('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.jx.update_many({"SnapShot": "", "image_status": {"$ne": 2}, "ifbprogress": "中标公告"},
                            {"$set": {"image_status": 0}})
        while True:
            try:
                one_data = self.jx.find_one_and_update({"SnapShot": "", "image_status": 0, "ifbprogress": "中标公告"},
                                                       {"$set": {"image_status": 1}},
                                                       {"_id": 1, "originalurl": 1, "text_xpath": 1})
                if not one_data:
                    self.m.info('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i >= 5:
                    self.m.error('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i += 1
                time.sleep(3)

        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def screen_shot_consumer(self):
        """
        上传图片
        """
        while True:
            if not self.screen_queue.qsize():
                self.m.info('%s:::截图的队列为空休息5s' % datetime.now())
                time.sleep(5)
                continue
            result = self.screen_queue.get()
            if not result:
                self.m.info("screen_shot_consumer结束")
                break
            self.m.info('%s:::url : %s 开始截图' % (datetime.now(), result["originalurl"]))
            upload_image_delete_pic2(result["originalurl"], self.jx, result['_id'])

    def get_all_type(self):
        """
        构造目录表
        获取industry 和 ifbprogresstag信息的流程
        """
        response = self.send_rquest_get(url=self.url, headers=self.headers)
        res = etree.HTML(response)
        ifbprogresstag_text_list = res.xpath("//div[@class='nynav']/ul/li/a/text()")
        ifbprogresstag_url_list = ["http://www.jxtb.org.cn/" + i for i in
                                   res.xpath("//div[@class='nynav']/ul/li/a/@href")]

        for ifbprogresstag_text, ifbprogresstag_url in zip(ifbprogresstag_text_list, ifbprogresstag_url_list):
            param_result = self.jx_params.find_one(
                {"ifbprogresstag_text": ifbprogresstag_text, "ifbprogresstag_url": ifbprogresstag_url, })
            if param_result is None:
                data = {
                    "ifbprogresstag_text": ifbprogresstag_text,
                    "ifbprogresstag_url": ifbprogresstag_url,
                    'status': 0
                }
                try:
                    self.jx_params.update_one(data, {'$set': data}, upsert=True)
                except Exception as e:
                    self.m.error("插入params到数据库中失败%s" % e)

    def run_thread_list(self):
        self.get_all_type()
        self.thread_name_list = [
            self.add_page_list_product,  # 获取列表的生产者
            self.add_detail_to_queue,  # 获取详情的生产者
            # self.screen_shot_product   # 获取截图的生产者
        ]
        self.more_thread_name_list = [
            self.get_page_list_consumer,  # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer # 获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        self.get_all_type()  # 所有的筛选种类入库

        # self.add_page_list_product(True)  # 获取列表的生产者
        # self.get_page_list_consumer() # 获取列表的消费者

        # self.add_detail_to_queue(True)  # 获取详情的生产者
        # self.get_detail_consumer()  # 获取详情的消费者

        # self.screen_shot_product(True)  # 获取截图的生产者
        # self.screen_shot_consumer()  # 获取截图的消费者


if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'lizhiheng_db2'
    jiangxizhaobiaotoubiao = JiangXi(db_name=db_name)
    jiangxizhaobiaotoubiao.m.info("开始")
    jiangxizhaobiaotoubiao.run()
    # jiangxizhaobiaotoubiao.run_test()
