# -*- coding: utf-8 -*-
# @ 来源平台：   河南省公共资源交易中心
# @ 来源网址：   http://www.hnggzy.com/hnsggzy/      # http://www.hnggzy.com/jyxx/002001/transaction_notice.html
# @ 时间：  2022/1/15 10:33
# Tool ：PyCharm
# @ author: LZL
# 目录：   lzl_henan_menu_jl
# 数据：   lzl_zx_data_hns
# @ 网站

import datetime
import hashlib
import json
import os
import re
import sys
import time


from datetime import datetime, timedelta
from parsel import Selector
from queue import Queue
from pymongo import ReturnDocument, UpdateOne
from lxml import etree
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, BASE_DIR)
from base_spider import BaseSpider
from conf.dber import MongoDBer
from conf.conn_pool import get_connection
from conf.database import DATABASE
from conf.Pic_Use import get_host_ip,path_file, pic_use

from conf import conf_util

class SpiderMenu(BaseSpider):
    def __init__(self,thread_num=4):
        super().__init__(thread_num)
        self.host_ip = get_host_ip()  # 获取本机IP
        self.db_m = MongoDBer(DATABASE['liuzilong'])  # mongodb 库连接对象
        self.sourceplatform = '河南省公共资源交易中心'
        self.url = 'https://midd.jianshequan.com'
        self.Authorization = 'Bearer kasjdflqjkeofawejifjfglewjemrgopwekrgopwerjkgopqwerjksgogkp'
        self.menu = self.db_m.lzl_henan_menu_jl  # 目录名字
        self.detail_data_col = self.db_m.lzl_zx_data_hns  # 数据详情页数据
        self.page_log_col = self.db_m.lzl_page_jl  # 记录获取列表的状态，以及种类筛选
        self.original_website_id = 12
        self.detail_queue = Queue(maxsize=40)  # 获取标书详情的队列
        # 目录页队列
        self.page_queue = Queue()
        self.screen_queue = Queue()
        # 以下参数根据网站情况填写增加

        self.title_xpath = "//div[@class='text detail-list']"
        self.Ttitle_xpath = '//div[@class="title-text"]'
        self.province = '河南省'
        self.detail_producer_flag = True
        self.pd_if = {'002001001': ['工程建设', '招标公告'], '002001002': ['工程建设', '变更公告'], '002001003': ['工程建设', '评标结果公示'],
                      '002001004': ['工程建设', '文件预公示'], '002001005': ['工程建设', '异常公告'], '002001006': ['工程建设', '中标结果公告'],
                      '002002001': ['政府采购', '采购公告'], '002002002': ['政府采购', '变更公告'], '002002003': ['政府采购', '结果公示'],
                      '002002004': ['政府采购', '其他公告'],
                      '002003001': ['产权交易', '交易公告'], '002003002': ['产权交易', '通知公告'], '002003005': ['产权交易', '成交公示'],
                      '002003007': ['产权交易', '其他信息'],
                      '002004001': ['矿权交易', '出让公告'], '002004005': ['矿权交易', '交易预公告'], '002004003': ['矿权交易', '变更公告'],
                      '002004004': ['矿权交易', '结果公示'],
                      '002004002': ['矿权交易', '转让公告'],
                      '002005001': ['医药采购', '采购公告'], '002005002': ['医药采购', '变更公告'], '002005003': ['医药采购', '结果公示'],
                      '002006001': ['ppp+', '政策法规'], '002006002': ['ppp+', '项目公告'], '002006003': ['ppp+', '结果公示'],
                      '002006004': ['ppp+', '项目库']}
        self.is_pic = ['002001003', '002001006', '002002003']
        self.if_zhongbiao = ['002001003', '002001006', '002002003', '002003005', '002004004', '002005003', '002006003']
        self.DATARUN = {
            '工程建设': '002001',
            '政府采购': '002002',
             # '产权交易': '002003',
            '矿权交易': '002004',
            '医药采购': '002005',
            'ppp+': '002006',
        }
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36'}

    def get_two_title(self, choice, item):
        # 获取二级标题
        TwoLvTitle = ''.join(choice.xpath(f"{self.Ttitle_xpath}//text()").getall())
        if TwoLvTitle:
            item['TwoLvTitle'] = TwoLvTitle
            return True
        TwoLvTitle = choice.xpath(f'//div[@class="template-title"]/text()').get()
        if TwoLvTitle:
            item['TwoLvTitle'] = TwoLvTitle
            return True

    def get_all_text(self, res1, test_xpath, title, publishdate):
        rules1 = re.compile("[\u4e00-\u9fa5]")
        res_html = etree.HTML(res1)
        res_text = "".join(res_html.xpath(test_xpath))
        if res_text:
            all_text = "".join(rules1.findall(res_text))
            if all_text and len(all_text) > 20 and title != '' and publishdate != '':
                html_id = self.sha256_all_text(all_text)
                return html_id
        return None

    def sha256_all_text(self,all_text):
        """
        sha256加密alltext
        """
        sha = hashlib.sha256()
        sha.update(all_text.encode())
        return sha.hexdigest()

    def get_html_id_xpath(self, html,title,publishdate):
        ''' 获取html_id和正文xpath '''
        html_id = self.get_all_text(html,'//*[@class="text detail-list"]//text()',title,publishdate)
        if html_id:
            return {'status': 200, 'html_id': html_id, "text_xpath": '//*[@class="text detail-list"]', "html": html}
        html_id = self.get_all_text(html,"//*[@id='TDContent']//text()",title,publishdate)
        if self.get_all_text(html,"//*[@id='TDContent']//text()",title,publishdate):
            return {'status': 200, 'html_id': html_id, "text_xpath": "//*[@id='TDContent']", "html": html}
        return {'status': 404, 'html_id': '', 'text_xpath': ''}

    def get_all_type(self):
        """ 获取所有的大类型，并入库 """
        type_url = 'http://www.hnggzy.com/jyxx/002001/transaction_notice.html'
        res_html = self.send_rquest_get(type_url)
        html_obj = etree.HTML(res_html)
        type_obj_list = html_obj.xpath('.//div[@class="bdl clearfix"]/ul/li')
        for one_li_obj in type_obj_list:
            a_obj = one_li_obj.xpath('./a')[0]
            industry = a_obj.xpath('./@title')[0]
            a_url = a_obj.xpath('./@href')[0]
            industry_id = one_li_obj.xpath('./@class')[0].replace('about_', '')
            one_info = {'industry': industry,
                  'sourceplatform': self.sourceplatform,
                  'id': industry_id,
                  'link': 'http://www.hnggzy.com' + a_url,
            }
            self.page_log_col.update_one({'id': industry_id}, {'$set': one_info}, upsert=True)


    def get_all_page_num(self, one_res_dict):
        """ 获取总页码数 """
        all_count = one_res_dict.get('custom', {}).get('count', 1)
        all_page, remainder = divmod(all_count, 8)
        if not remainder:
            all_page -= 1  # 因为页码是从0开始
        return all_page, all_count

    def parse_one_page_data(self, one_res_dict, one_data_dict):
        """ 解析一页的数据 """
        # buff_list = []
        day_end_flag = False
        for one_data in one_res_dict.get('custom', {}).get('infodata', []):
            if 'https' not in one_data['infourl']:
                one_data['originalurl'] = 'http://www.hnggzy.com' + one_data['infourl']
            pd = self.menu.find_one({'originalurl': one_data['originalurl']})
            try:
                one_data['publishdate'] = datetime.strptime(one_data['startdate'], '%Y-%m-%d %H:%M:%S')
            except:
                one_data['publishdate'] = datetime.strptime(one_data["infodate"], '%Y-%m-%d')
            if one_data_dict.get('day_flag'):
                # 增量更新的时候做判断
                if one_data['publishdate'] < datetime.now() - timedelta(days=2):
                    day_end_flag = True
                    break
            if pd:
                continue
            channelname = Selector(one_data['customtitle']).xpath("//font[@color='#CC00FF']/text()").get()
            industryv2 = Selector(one_data['customtitle']).xpath("//font[@color='#FF6600']/text()").get()
            if not channelname:
                one_data['channelname'] = ''
            else:
                one_data['channelname'] = channelname.replace('[', '').replace(']', '')
            if not industryv2:
                one_data['industryv2'] = ''
            else:
                one_data['industryv2'] = industryv2.replace('[', '').replace(']', '')
                if ('施工' in industryv2) or ('监理' in industryv2):
                    if not one_data['channelname']:
                        one_data['channelname'] = one_data['industryv2']
                    else:
                        one_data['channelname'] = ','.join([one_data['channelname'], one_data['industryv2']])
            one_data['city'] = ''
            one_data['tenderaddress'] = '河南省'
            one_data['industry'] = one_data_dict['industry']
            one_data['ifbprogresstag'] = self.pd_if[one_data['categorynum']][1]
            if one_data['categorynum'] in self.if_zhongbiao:
                one_data['ifbprogress'] = '中标公告'
                if one_data['categorynum'] in self.is_pic:
                    one_data['is_pic'] = 1
                else:
                    one_data['is_pic'] = 0
            else:
                one_data['ifbprogress'] = '招标公告'
                one_data['is_pic'] = 0
            one_data['province'] = self.province
            one_data['sourceplatform'] = self.sourceplatform
            one_data['is_html'] = 0
            one_data['version_num'] = 2
            one_data['cz'] = 0
            one_data['IP'] = self.host_ip
            one_data['ctime'] = datetime.now()
            one_data['utime'] = datetime.now()
            self.menu.insert_one(one_data)
            # buff_list.append(UpdateOne({'originalurl': one_data['originalurl']}, {'$set': one_data}, upsert=True))
        # if buff_list:
        #     self.menu.bulk_write(buff_list)
        return day_end_flag

    # 目录逻辑和解析
    def get_one_type_list(self, one_data):
        ''' 获取一个刷选类型的所有页码，页码是从第0页开始 '''
        url = 'http://www.hnggzy.com/EpointWebBuilder/rest/frontAppCustomAction/getPageInfoListNew'
        industry = one_data['industry']
        if not one_data.get('day_flag'):
            if (datetime.now() - one_data.get('all_start_time', datetime.now() - timedelta(days=11))).days > 10:
                self.page_log_col.update_one({'_id': one_data['_id']}, {'$set': {'all_start_time': datetime.now()}})
                page = 0
            else:
                page = one_data.get('page', 0)
        else:
            page = 0
        while True:
            print('%s::: 类型: %s 类型ID: %s 开始第 %s 页数据爬取' % (datetime.now(), industry, one_data['id'], page))
            try:
                form_ = {
                    "siteGuid":"7eb5f7f1-9041-43ad-8e13-8fcb82ea831a",
                    "categoryNum":one_data['id'],
                    "kw":"","startDate":"","endDate":"",
                    "pageIndex": page, "pageSize":8, "jytype":""
                }
                form_data = {'params': json.dumps(form_)}
                res_str = self.send_rquest_post(url, data=form_data, headers=self.headers)
                if not res_str:
                    raise Exception('该页状态码异常')
                    break
                one_res_dict = json.loads(res_str)
                all_page, all_count = self.get_all_page_num(one_res_dict)
                if page >= all_page:
                    self.page_log_col.update_one({'_id': one_data['_id']}, {'$set': {'page': page, 'day_flag': True, 'all_start_time': datetime.now()}})
                    break
                end_flag = self.parse_one_page_data(one_res_dict, one_data)  # 增量采集结束的标志
                if end_flag:
                    self.page_log_col.update_one({'_id': one_data['_id']}, {'$set': {'day_flag': True, 'all_start_time': datetime.now()}})
                    break
                page += 1
            except Exception as err:
                self.page_log_col.update_one({'_id': one_data['_id']},{'$set':{'page': page}})
                print('%s::类型: %s 第 %s 页码采集异常结束 err info::%s' %(datetime.now(), industry, page, err))
                return

    def get_page_list_product(self, flag=False):
        ''' 各个筛选项添加到队列 '''
        for one_data in self.page_log_col.find({'sourceplatform': self.sourceplatform}):
            self.page_queue.put(one_data)
            if flag:
                break
        for i in range(self.thread_num):
            self.page_queue.put(None)

    def get_page_list_consumer(self):
        ''' 获取各个筛选项目录信息 '''
        while True:
            if not self.page_queue.qsize():
                print('筛选项队列为空,休息5s')
                time.sleep(5)
                continue
            one_data = self.page_queue.get()
            if not one_data:
                break
            self.get_one_type_list(one_data)

    # @retry(pymongo.errors.NetworkTimeout, tries=3, delay=3)
    def get_detail_product(self, flag=False):
        """ 获取标书详情的生产者线程 """
        i = 0
        filter_ = {'is_html': 1, 'cz': {'$ne': 1}}
        update = {'$set': {'is_html':0}}
        self.menu.update_many(filter_, update)
        print('%s:::获取标书详情的生产者线程开启' % datetime.now())
        filter_ = {'is_html': 0, 'cz': 0}
        update_ = {'$set': {'is_html': 1}}
        proj = {}
        while True:
            try:
                data = self.menu.find_one_and_update(filter_, update_, proj, return_document=ReturnDocument.AFTER)
                if not data :
                    print("%s:::获取标书详情的生产者结束", datetime.now())
                    break
                self.detail_queue.put(data)

                if flag:
                    break
            except Exception as err:
                print('%s:::获取标书详情从库中读取数据err info:%s' % (datetime.now(), err))
                i+=1
                time.sleep(3)
                if i>=10:
                    break
        for i in range(self.thread_num):
            self.detail_queue.put(None)

    # @retry(pymongo.errors.AutoReconnect, tries=3, delay=3)
    def get_one_detail(self, item):
        """ 获取一个标书详情的处理方式 """
        item["weather_have_iframe"] = 0
        item["weather_have_image"] = 0
        item["weather_have_pdf"] = 0
        item["weather_have_pdf_type2"] = 0
        item['weather_have_enclosure'] = 0  # 是否有附件
        item["url_type"] = 'html'
        item["original_website_id"] = self.original_website_id
        item["weather_have_blank_url"] = 0  #
        filter_ = {'originalurl':  item['originalurl']}
        if 'anncId' in item['originalurl']:
            self.menu.update_one(filter_, {'$set': {'cz': 10}})
            return
        filter_ = {'originalurl':  item['originalurl']}
        one = self.detail_data_col.find_one(filter_, {'_id': 1})
        if one:
            print('%s:::数据已存在' % datetime.now())
            print(item['originalurl'])
            self.menu.update_one(filter_, {'$set': {'cz': 1, 'response_status_code': 200}})
            return
        # 获取详情页数据
        op = self.get_one_html(item)
        if not op:
            return  # 数据请求异常的情况直接结束返回
        item['is_html'] = 0   # 解析的时候初始状态为零
        item['is_parse_html'] = 0
        item['utime'] = datetime.now()
        self.detail_data_col.insert_one(item)
        self.menu.update_one(filter_, {'$set': {'cz': 1, 'response_status_code': 200}})
        time.sleep(2)

    def get_one_html(self, item):
        url = item['originalurl']
        print('%s:::获取详情的URL：%s 开始获取' % (datetime.now(),url))
        header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
                  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',

                  }
        try:
            response = self.send_rquest_get(url=url, headers=header)
        except:
            response =''
        filter_ = {'originalurl':  item['originalurl']}
        if type(response)==int:
            self.menu.update_one(filter_, {'$set': {'response_status_code': response, "cz": 4}})
            return False
        elif response:
            print('解析详情')
            choice = Selector(response)
            two_lvtitle = self.get_two_title(choice, item)  # 解析二级标题
            if not two_lvtitle:
                return False
            self.menu.update_one(filter_, {'$set': {'response_status_code': 200}})
        else:
            self.menu.update_one(filter_, {'$set': {'response_status_code': -1}})
            print('%s::: 获取详情URL：%s 响应状态码异常' % (datetime.now(), url))
            return False
        item["Bid_data_acquisition_format"] = "HTML"
        item["originalurl_data_from"] = {
            "url": url,
            "method": "get",
            "request_only_data": "",
            "response_only_data": ""
        }
        item["file_json"] = ""
        html_xpath_dict = self.get_html_id_xpath(response, item['title'], item['publishdate'])
        if html_xpath_dict["status"] == 200:
            item['html_id'] = html_xpath_dict['html_id']
            item['html'] = html_xpath_dict['html']
            item['text_xpath'] = html_xpath_dict['text_xpath']
        else:
            return False
        # 根据生成的html_id 判断数据是否存在，如果存在生成html_id 有重复的情况
        find_one_data = self.detail_data_col.find_one({"html_id": item['html_id']}, {"_id": 1})
        if not find_one_data:
            item['xpath_err'] = 0  # 正常情况下xpath_err 状态为0
        else:
            item['xpath_err'] = 10
        title_xpath = item['text_xpath']
        # 判断正文中是否存在图片和内嵌窗口
        if choice.xpath(f'{title_xpath}//img').get() is None:
            item["weather_have_image"] = 0
        else:
            item["weather_have_image"] = 1
        if choice.xpath('//iframe').get() is None:
            item["weather_have_iframe"] = 0
        else:
            item["weather_have_iframe"] = 1
        return True

    def get_detail_consumer(self):
        """ 获取标书详情的消费者线程 """
        print('%s:::获取标书详情的消费者线程开启' % datetime.now())
        while True:
            if not self.detail_queue.qsize():
                print('%s:::获取详情的队列为空休息5s' % datetime.now())
                time.sleep(5)
                continue
            one_detail_data = self.detail_queue.get()
            if not one_detail_data:
                print('%s:::获取标书详情的消费者线程结束' % datetime.now())
                break
            self.get_one_detail(one_detail_data)  # 获取一个链接的html

    def screen_shot_product(self, flag=False):
        """ 截图的生产者线程 """
        print('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.detail_data_col.update_many({'is_pic': {'$in': [4, 3]}, 'is_pic_time': {'$lt': datetime.now() - timedelta(days=1)}},
                {'$set':{'is_pic':1}})
        while True:
            try:
                one_data = self.detail_data_col.find_one_and_update({'is_pic': 1}, {'$set':{'is_pic': 3, 'is_pic_time': datetime.now()}},
                        {'_id':1, 'originalurl':1})
                if not one_data:
                    print('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i>=5:
                    print('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i+=1
                time.sleep(3)

        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def screen_shot_consumer(self):
        """ 获取截图的消费者线程 """
        print('%s:::获取截图的消费者线程开启' % datetime.now())
        while True:
            if not self.screen_queue.qsize():
                print('%s:::截图的队列为空休息5s' % datetime.now())
                time.sleep(5)
                continue
            item = self.screen_queue.get()
            if not item:
                print('%s:::获取截图的消费者结束' % datetime.now())
                break
            pic_name = item['_id']
            url = item['originalurl']
            print('%s:::url : %s 开始截图' % (datetime.now(), url))
            path_dict = {}
            name = 'henan_publish'
            save_path, small_path = path_file(name=name, picname=pic_name)
            try:
                pic_use(url=url, save_path=save_path, small_path=small_path, item=path_dict, red=1)
                item['SnapShot'] = path_dict['path']
                os.remove(save_path)
                os.remove(small_path)
                self.detail_data_col.update_one({'_id': pic_name}, {'$set': {'SnapShot': path_dict['path'], 'is_pic': 2}})
            except Exception as err:
                self.detail_data_col.update_one({'_id': pic_name}, {'$set': {'SnapShot': path_dict['path'], 'is_pic': 4}})

    def run_thread_list(self):
        self.get_all_type()  # 所有的筛选种类入库
        self.thread_name_list = [
            self.get_page_list_product, # 获取列表的生产者
            self.get_detail_product,  # 获取详情的生产者
            # self.screen_shot_product,  # 获取截图的生产000者
        ]
        self.more_thread_name_list = [
            self.get_page_list_consumer, # 获取列表的消费者
            self.get_detail_consumer,  # 获取详情的消费者
            # self.screen_shot_consumer,  # 获取截图的消费者
        ]

    def run_test(self):
        import pdb
        pdb.set_trace()
        # self.get_all_type()  # 所有的筛选种类入库

        # self.get_page_list_product(True)  # 获取列表的生产者
        # self.get_page_list_consumer() # 获取列表的消费者

        self.get_detail_product(True)  # 获取详情的生产者
        self.get_detail_consumer()  # 获取详情的消费者

        self.screen_shot_product(True)  # 获取截图的生产者
        self.screen_shot_consumer()  # 获取截图的消费者



if __name__ == '__main__':
    spider = SpiderMenu()
    spider.run()
    # spider.run_test()
