#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
import hashlib
import random
import re
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
import time
from hashlib import md5
from queue import Queue

import requests

BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from lxml import etree, html
from pymongo import UpdateOne, ReturnDocument

from tender_project. base_spider import BaseSpider
from tender_project.conf.conf_util import  title_strip, upload_image_delete_pic2, \
    sha256_all_text, clear_html, parse_xpath, judge_xpath
from tender_project.conf.database import DATABASE
from tender_project.conf.dber import MongoDBer
from tender_project.conf.logging_debug import MyLogging


# 批量更新
class guangdongshengshuiwujuW(BaseSpider):
    def __init__(self, db_name):
        super(guangdongshengshuiwujuW, self).__init__()
        self.headers = {
            # "Cookie": "JSESSIONID=325F32A63F535B0ECC371B5881FAAE44; jfe_pin=edb5223c; jfe_ts=1667356652.668; jfe_sn=cj4oWRHJrJdcLhbDnSWPd5qP5hU=",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
 }
        self.params_queue = Queue(maxsize=10)  # 筛选队列
        self.detail_queue = Queue(maxsize=100)  # 数据队列
        self.index_status = True  # 索引是否添加
        self.screen_queue = Queue(maxsize=100)  # url队列  用来截图
        self.local = threading.local()
        self.db_m = MongoDBer(DATABASE[db_name])  # mongodb 库连接对象
        self.db_m1 = MongoDBer(DATABASE['guxulong'])  # mongodb 库连接对象
        # self.db_m1 = MongoDBer(DATABASE['test_gu'])  # mongodb 库连接对象
        self.guangdongshengshuiwuju_param = self.db_m["guangdongshengshuiwuju_param"]
        self.guangdongshengshuiwuju_param_err = self.db_m["guangdongshengshuiwuju_param_err"]
        self.guangdongshengshuiwuju = self.db_m1["xinyongpingjia_data_detail_all"]
        self.xpaths = ["//table[@class='zdaj']"]
        # 日志
        self.m = MyLogging(self.__class__.__name__, debug_level=logging.INFO, write_level=logging.INFO).logging
        self.create_indexes()  # 创建表索引
        self.myself_ip = self.get_myself_ip()  # 获得本机ip


    def get_all_number_page(self, res, param_result):
        """
        获得总页数
        """
        res_ = etree.HTML(res)
        all_number_total_ = res_.xpath('//div[@class="fenye"]/text()')
        if all_number_total_ != []:
            all_number_page = all_number_total_[0].split('/')[1].replace('页', '')
        else:
            all_number_page = 1
        return int(all_number_page)


    def parse_data(self, param_res, param_result):
        """
        解析列表页数据
        """
        # 增量标志位
        day_end_flag = True
        title_list = []
        originalurl_list = []
        publishdate_list = []
        ifbunit_list = []
        param_res_xpath = etree.HTML(param_res)
        param_res_xpath_list = param_res_xpath.xpath('//table[@id="zdss_tb"]/tr')
        for result_ in param_res_xpath_list:
            originalurl_ = result_.xpath('./td[2]/a/@onclick')
            if originalurl_ != []:
                originalurl_id = str(originalurl_[0]).replace("queryIllegalDetail('", '').replace("')", '')
            else:
                originalurl_id = ''
            originalurl_list.append(originalurl_id)
            title_ = result_.xpath('./td[2]/a/text()')
            if title_ != []:
                title = title_[0].strip()
            else:
                title = ''
            print(title)
            title_list.append(title)

            ifbunit_bianhao = result_.xpath('./td[3]/text()')
            if ifbunit_bianhao != []:
                company_bianhao = ifbunit_bianhao[0].strip().replace('发布人：', '')
            else:
                company_bianhao = ''
            ifbunit_list.append(company_bianhao)

            publishdate_ = result_.xpath('./td[5]/text()')
            if publishdate_ != []:
                publishdate_1 = publishdate_[0].strip().replace('发布日期', '').replace('年', '-').replace('月', '-').replace('日', '')

            else:
                publishdate_1 = '1970-01-01'
            try:
                publishdate112 = datetime.strptime(publishdate_1, "%Y-%m-%d")
            except:
                publishdate112 = datetime.strptime('1970-01-01', "%Y-%m-%d")
            if publishdate112 == datetime.strptime('1970-01-01', "%Y-%m-%d"):
                publishdate = ''
            else:
                publishdate = publishdate112
            publishdate_list.append(publishdate)
        return title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list

    def remove_js_style(self, response):
        '''
        删除HTML中的js和css
        '''
        try:
            encoding_ = 'utf-8'
            tree = etree.HTML(response)
            ele = tree.xpath("//script | //noscript")
            for e in ele:
                e.getparent().remove(e)
            ele2 = tree.xpath('//style | //nostyle')
            for e2 in ele2:
                e2.getparent().remove(e2)
            Html = html.tostring(tree, encoding=encoding_).decode(encoding_)
            return Html
        except Exception as err:
            print('remove_js_style报错是%s ' % err)
            return False

    def create_indexes(self):
        if self.index_status:
            temp_url_index = True
            temp_status_index = True
            update_time_index = True
            for index in self.guangdongshengshuiwuju.list_indexes():
                if "originalurl" in index["name"]:
                    temp_url_index = False
                    continue
                if "status" in index["name"]:
                    temp_status_index = False
                    continue
                if "update_time" in index["name"]:
                    update_time_index = False

            if temp_url_index:
                self.guangdongshengshuiwuju.create_index([("originalurl_id", 1),("html_id", 1)], unique=True, background=True)
            if temp_status_index:
                self.guangdongshengshuiwuju.create_index([("status", 1), ("sourceplatform", 1)], background=True)
            if update_time_index:
                self.guangdongshengshuiwuju.create_index([("utime", 1)], background=True)

            self.index_status = False

    def md5_url(self, url):
        m = md5()
        m.update(url.encode())
        return m.hexdigest()

    def save_data(self, title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list ):
        """
        存储数据
        """
        for title, originalurl, publishdate, ifbunit in zip(
                title_list, originalurl_list, publishdate_list, ifbunit_list):

            ctime = datetime.now()
            if originalurl != '':
                self.insert_data('广东省', '', '', title, publishdate,   '国家税务总局广东省税务局', originalurl, ctime)
            else:
                print('无')

    def insert_data(self,   province, city, county,title, publishdate, sourceplatform,originalurl, ctime):
        """
        插入数据
        """
        self.local.insert_data_list.append(UpdateOne(
            {"originalurl_id": originalurl}, {
                "$set": {
                    "province": province, "city": city, "county": county, "title": title_strip(title),
                    "publishdate": publishdate,
                    "sourceplatform": sourceplatform,
                    "originalurl_id": originalurl,
                    "ctime": ctime,
                    "status": 0,
                    'behavior_type':'',
                    "utime": ctime
                }
            }, upsert=True
        ))

        if len(self.local.insert_data_list) >= 100:
            try:
                self.guangdongshengshuiwuju.bulk_write(self.local.insert_data_list)
            except Exception as e:
                self.m.error("插入失败%s条数据" % len(self.local.insert_data_list), e)
            else:
                self.m.info("插入成功%s条数据" % len(self.local.insert_data_list))
            finally:
                self.local.insert_data_list.clear()



    def parse_xunhuan(self, param_result):
        """
        翻页获取列表页数据
        """
        # 查询day_flag是否为True
        if not param_result.get('day_flag'):
            if (datetime.now() - param_result.get('all_start_time', datetime.now() - timedelta(days=2))).days > 2:
                self.guangdongshengshuiwuju_param.update_one({'_id': param_result['_id']},
                                             {'$set': {'all_start_time': datetime.now()}})
                page = 1
            else:
                page = param_result.get('page', 1)
        else:
            page = 1
        while True:
            headers = {
                "Accept": "text/html, */*; q=0.01",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "$Cookie": "fjbz=fan; yfx_c_g_u_id_10003721=_ck23091215142819078291530724513; yfx_f_l_v_t_10003721=f_t_1694502868896__r_t_1694502868896__v_t_1694502868896__r_c_0; yfx_c_g_u_id_10003713=_ck23091215482713775297647243007; yfx_f_l_v_t_10003713=f_t_1694504907370__r_t_1694504907370__v_t_1694504907370__r_c_0; yfx_c_g_u_id_10003710=_ck23091215493617871711794301271; yfx_f_l_v_t_10003710=f_t_1694504976777__r_t_1694504976777__v_t_1694504976777__r_c_0; yfx_c_g_u_id_10000447=_ck23091215592318475180134561103; yfx_f_l_v_t_10000447=f_t_1694505563835__r_t_1694505563835__v_t_1694505563835__r_c_0; yfx_c_g_u_id_10003704=_ck23091216190615929231810928172; yfx_f_l_v_t_10003704=f_t_1694506746582__r_t_1694506746582__v_t_1694506746582__r_c_0; yfx_c_g_u_id_10003746=_ck23091217194916411671712134997; yfx_f_l_v_t_10003746=f_t_1694510389636__r_t_1694510389632__v_t_1694510389632__r_c_0; yfx_c_g_u_id_10003718=_ck23091217231610143025793176120; yfx_f_l_v_t_10003718=f_t_1694510595981__r_t_1694510595981__v_t_1694510595981__r_c_0; yfx_c_g_u_id_10003701=_ck23091217282818927413761630335; yfx_f_l_v_t_10003701=f_t_1694510908857__r_t_1694510908857__v_t_1694510908857__r_c_0; acw_tc=2f6a1fe616945941754516821efcbea89e76d7d5d2e52b198b005bf1e1bdda; route=07712e9968335208630db412b6f9f229; JSESSIONID=2HaNrJBW45uiGSUIpORDRVowHDU3DkEfiS8u3KzDiTs0_4mi6ji5\\u0021-1915158918; yfx_c_g_u_id_10003702=_ck23091316361615298712134470158; yfx_f_l_v_t_10003702=f_t_1694594176520__r_t_1694594176520__v_t_1694594176520__r_c_0; Hm_lvt_a83fd5cd47f51f4a9b53ddc2b0b56cde=1694224700,1694594177; Hm_lpvt_a83fd5cd47f51f4a9b53ddc2b0b56cde=1694594177; SERVERID=86cdeeb042bfa50e4e2f6cfb3d39739e|1694594421|1694594175",
                "Origin": "http://guangdong.chinatax.gov.cn",
                "Referer": "http://guangdong.chinatax.gov.cn/siteapps/webpage/gdtax/zdsswfaj/index.jsp",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest"
            }
            url = "http://guangdong.chinatax.gov.cn/siteapps/webpage/gdtax/zdsswfaj/query.jsp"
            data = {
                "pageSize": "10",
                "pageNo": str(page),
                "channelId": "b28dcb23c0a3496ca04ad1e39707d31e"
            }
            time.sleep(1)
            param_res = requests.post(url, headers=headers, data=data, verify=False).content.decode('utf-8')
            # all_number_page = self.get_all_number_page(param_res, param_result)
            all_number_page = 1180
            print('总页数是%s' % all_number_page)
            # 判断循环结束
            if page > int(all_number_page):
                print('抓取结束')
                break
            if param_res:
                title_list, originalurl_list, publishdate_list, day_end_flag, ifbunit_list = self.parse_data(
                    param_res, param_result)

                self.save_data(title_list, originalurl_list, publishdate_list, page, param_result, ifbunit_list)
                self.m.info('第%s页获取数据成功' % (str(page)))
            else:
                print('没有数据')

            page = page + 1
        self.m.info('获取数据完毕')

    def get_menu_producer_consumer(self):
        """
        获取列表页数据流程
        """
        self.local.insert_data_list = []  # 批量插入
        param_result = {}
        self.parse_xunhuan(param_result)


    def judge_xpath_err(self, detail_result):

        image_status = 0
        xpath_err = 0
        return image_status, xpath_err

    def get_all_text(self, res1, test_xpath):
        rules1 = re.compile("[\u4e00-\u9fa5]")
        res_html = etree.HTML(res1)
        res_text = "".join(res_html.xpath(test_xpath))
        if res_text:
            all_text = "".join(rules1.findall(res_text))
            if all_text and len(all_text) > 20:
                html_id = sha256_all_text(all_text)
                return html_id, all_text
        return None, ''

    def sha256_all_text(self, all_text):
        """
        sha256加密alltext
        """
        sha = hashlib.sha256()
        sha.update(all_text.encode())
        return sha.hexdigest()

    def judge_xpath(self, res, xpaths):
        """
        判断xpath
        """
        for xpath in xpaths:
            temp = parse_xpath(res, xpath)
            if temp:
                return xpath
        return ""



    def get_detail_consumer(self):
        """
        更新html字段和二级标题字段和更新时间
        """
        self.local.get_detail_consumer_list = []
        while True:
            detail_result = self.detail_queue.get()
            # time.sleep(random.randint(2, 4))
            if not detail_result:
                try:
                    self.guangdongshengshuiwuju.bulk_write(self.local.get_detail_consumer_list)
                    self.m.info("get_detail_consumer结束")
                except Exception as e:
                    self.m.error("更新失败%s" % e)
                else:
                    self.m.debug("更新成功%s条" % len(self.local.get_detail_consumer_list))

                break
            else:
                headers = {
                    "Accept": "text/html, */*; q=0.01",
                    "Accept-Language": "zh-CN,zh;q=0.9",
                    "Connection": "keep-alive",
                    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                    # "$Cookie": "fjbz=fan; yfx_c_g_u_id_10003721=_ck23091215142819078291530724513; yfx_f_l_v_t_10003721=f_t_1694502868896__r_t_1694502868896__v_t_1694502868896__r_c_0; yfx_c_g_u_id_10003713=_ck23091215482713775297647243007; yfx_f_l_v_t_10003713=f_t_1694504907370__r_t_1694504907370__v_t_1694504907370__r_c_0; yfx_c_g_u_id_10003710=_ck23091215493617871711794301271; yfx_f_l_v_t_10003710=f_t_1694504976777__r_t_1694504976777__v_t_1694504976777__r_c_0; yfx_c_g_u_id_10000447=_ck23091215592318475180134561103; yfx_f_l_v_t_10000447=f_t_1694505563835__r_t_1694505563835__v_t_1694505563835__r_c_0; yfx_c_g_u_id_10003704=_ck23091216190615929231810928172; yfx_f_l_v_t_10003704=f_t_1694506746582__r_t_1694506746582__v_t_1694506746582__r_c_0; yfx_c_g_u_id_10003746=_ck23091217194916411671712134997; yfx_f_l_v_t_10003746=f_t_1694510389636__r_t_1694510389632__v_t_1694510389632__r_c_0; yfx_c_g_u_id_10003718=_ck23091217231610143025793176120; yfx_f_l_v_t_10003718=f_t_1694510595981__r_t_1694510595981__v_t_1694510595981__r_c_0; yfx_c_g_u_id_10003701=_ck23091217282818927413761630335; yfx_c_g_u_id_10003702=_ck23091316361615298712134470158; Hm_lvt_a83fd5cd47f51f4a9b53ddc2b0b56cde=1694224700,1694594177; yfx_f_l_v_t_10003701=f_t_1694510908857__r_t_1694660959196__v_t_1694660959196__r_c_1; JSESSIONID=M0aRp6Wrd1R44NGsP4vXhlaP_xTqCSP_GT7FTtKsVgJQjvh0iK-D\\u0021757016018; yfx_f_l_v_t_10003702=f_t_1694594176520__r_t_1694660963966__v_t_1694660963966__r_c_1; Hm_lpvt_a83fd5cd47f51f4a9b53ddc2b0b56cde=1694660964; SERVERID=3bd4016b252e95fb2078785331aea217|1694663442|1694660961",
                    "Origin": "http://guangdong.chinatax.gov.cn",
                    "Referer": "http://guangdong.chinatax.gov.cn/siteapps/webpage/gdtax/zdsswfaj/index.jsp",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
                    "X-Requested-With": "XMLHttpRequest"
                }
                url = "http://guangdong.chinatax.gov.cn/siteapps/webpage/gdtax/zdsswfaj/service.jsp"
                data = {
                    "manuscriptId": detail_result['originalurl_id']
                }
                detail_res_ = requests.post(url, headers=headers, data=data, verify=False).content.decode('utf-8')
                if detail_res_:
                    xpath = judge_xpath(detail_res_, self.xpaths)
                    if xpath != '':
                        image_status, xpath_err = self.judge_xpath_err(detail_result)
                    else:
                        image_status, xpath_err = 2, 1
                        # raise Exception('xpath_err url是%s' % detail_result['originalurl'])

                    html_id, detail_text = self.get_all_text(detail_res_, xpath + "//text()")
                    print('html_id是'+html_id)
                    if html_id:
                        # 判断该条数据在数据库中是否存在
                        detail_res = self.guangdongshengshuiwuju.find_one(
                            {"originalurl_id": detail_result['originalurl_id'], "html_id": html_id})
                        if not detail_res:
                            self.local.get_detail_consumer_list.append(UpdateOne(
                                {"_id": detail_result["_id"]},
                                {"$set": {
                                    "html": clear_html(detail_res_),
                                    "html_id": html_id,
                                    "detail_text": detail_text,
                                    "utime": datetime.now(),
                                    "text_xpath": xpath,
                                    "behavior_type": '行政处罚',
                                    "xpath_err": 0,
                                    "status": 2,
                                }}
                            ))
                        else:
                            self.guangdongshengshuiwuju.update_one({"_id": detail_result["_id"]
                                                    }, {"$set": {
                                "status": 2,
                                "err_txt": "该数据为重复数据，无需上传截图",
                                "xpath_err": 1,
                                "html": "",
                            }})


                else:
                    self.guangdongshengshuiwuju.update_one({"_id": detail_result["_id"]}, {"$set": {
                        "status": 2,
                        "html": "",
                        "err_txt": "正文部分为空",
                        "xpath_err": 1,
                    }})
                if len(self.local.get_detail_consumer_list) >= 1:
                    try:
                        self.guangdongshengshuiwuju.bulk_write(self.local.get_detail_consumer_list)
                    except Exception as e:
                        self.m.error("更新失败%s" % len(self.local.get_detail_consumer_list), e)
                    else:
                        self.m.info("更新成功%s" % len(self.local.get_detail_consumer_list))
                    finally:
                        self.local.get_detail_consumer_list.clear()


    def add_detail_to_queue(self, flag=False):
        """
        添加需要更新的数据到队列中
        """
        filter_ = {'status': 1}
        update_ = {'$set': {'status': 0}}
        self.guangdongshengshuiwuju.update_many(filter_, update_)
        i = 0
        filter_ = {'status': 0, 'sourceplatform':'国家税务总局广东省税务局'}
        update_ = {'$set': {'status': 1, 'status_time': datetime.now()}}
        proj = {}
        while True:
            try:
                data = self.guangdongshengshuiwuju.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(3)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    def add_menu_producer_queue(self, flag=False):
        """
        获取列表的生产者
        目录页添加数据到队列的方法
        """
        filter = {'status': {"$ne": 0}}
        update = {'$set': {'status': 0}}
        self.guangdongshengshuiwuju_param.update_many(filter, update)
        filter_ = {'status': 0}
        update_ = {'$set': {'status': 1}}
        proj = {}
        i = 0
        while True:
            try:
                data = self.guangdongshengshuiwuju_param.find_one_and_update(filter_, update_, proj,
                                                             return_document=ReturnDocument.AFTER)
                if not data:
                    self.m.info("%s:::获取标书目录的生产者结束", datetime.now())
                    break
                self.params_queue.put(data)
                if flag:
                    break
            except Exception as err:
                self.m.error('%s:::获取标书目录从库中读取数据err info:%s' % (datetime.now(), err))
                i += 1
                time.sleep(1)
                if i >= 10:
                    break
        for i in range(self.thread_num):
            self.params_queue.put(None)



    def run_thread_list(self):
        self.get_menu_producer_consumer()
        self.thread_name_list = [
            self.add_detail_to_queue,  # 获取详情的生产者
        ]
        self.more_thread_name_list = [
            self.get_detail_consumer,  # 获取详情的消费者
        ]



if __name__ == '__main__':
    # db_name = 'test_gu'
    db_name = 'liuzilong'
    guangdongshengshuiwujuW = guangdongshengshuiwujuW(db_name)
    # guangdongshengshuiwujuW.get_params()
    guangdongshengshuiwujuW.proxy_flag = False
    guangdongshengshuiwujuW.run()