#!/usr/bin/python3.8
# -*- coding:UTF-8 -*-
"""
@author:    zhaoxi
@date:      2022/8/11
@software:  PyCharm
@file:      henan_qg_gonggongziyuan.py
@project:   tender_project
@time:      17:16
@user:      Administrator
"""
# -*- coding: utf-8 -*-
# @ 河南省公共资源交易中心(全国版！)
# @ 河南省公共资源交易中心
# @ http://hnsggzyfwpt.hndrc.gov.cn/
# @ time  2021-11-29    更新
# @ author: LZL
# @ 网站
import datetime
import json
import math
import os
import re
import sys
import time
import urllib.parse
import pymongo.errors
import requests
from pymongo import ReturnDocument
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, BASE_DIR)
from datetime import datetime
from parsel import Selector
from queue import Queue
from base_spider import BaseSpider
from conf.dber import MongoDBer
from conf.conn_pool import get_connection
from conf.database import DATABASE
from conf.Pic_Use import get_host_ip
from loguru import logger
from conf import conf_util
from lxml import etree

class HeNanQG(BaseSpider):

    def __init__(self,thread_num=4):
        super().__init__(thread_num)
        # self.conn = get_connection(DATABASE[db_name])  # mysql连接池
        self.db_m = MongoDBer(DATABASE['liuzilong'])  # mongodb 库连接对象
        # self.db_m = MongoDBer(DATABASE['zhaoxi'])  # mongodb 库连接对象
        self.qg_hns_menu_j = self.db_m.lzl_qg_hns_menu_j  # 目录数据库
        self.qg_hns_data = self.db_m.lzl_zx_qg_hns_l  # 标书详情数据库
        self.page_jl = self.db_m['lzl_page_jl']
        self.detail_queue = Queue(maxsize=40) # 获取标书详情页队列
        self.menu_queue = Queue(maxsize=20) # 网站目录队列
        self.screen_queue = Queue(maxsize=40)  # 截图队列
        self.menu_dict = {'002001001': ['工程建设', '招标公告', 0, '招标公告'],
                   '002001002': ['工程建设', '变更公告', 0, '招标公告'],
                   '002001003': ['工程建设', '评标结果公示', 1, '中标公告'],
                   '002001004': ['工程建设', '中标结果公示', 1, '中标公告'],
                   '002002001': ['政府采购', '采购公告', 0, '招标公告'],
                   '002002002': ['政府采购', '变更公告', 0, '招标公告'],
                   '002002003': ['政府采购', '结果公示', 1, '中标公告'],
                   '002003001': ['土地矿产', '交易公告', 0, '招标公告'],
                   '002003002': ['土地矿产', '变更公告', 0, '招标公告'],
                   '002003003': ['土地矿产', '成交公示', 1, '中标公告'],
                   '002004001': ['产权交易', '交易公告', 0, '招标公告'],
                   '002004002': ['产权交易', '变更公告', 0, '招标公告'],
                   '002004003': ['产权交易', '成交公示', 1, '中标公告'],
                   '002006001': ['其他交易', '交易公告', 0, '招标公告'],
                   '002006002': ['其他交易', '成交公示', 1, '中标公告']}
        self.sourceplatform = '全国公共资源交易平台(河南省)'
        self.province = '河南省'
        self.url = 'http://hnsggzyfwpt.hndrc.gov.cn/services/hl/getSelect'
        self.page_url = 'http://hnsggzyfwpt.hndrc.gov.cn/services/hl/getCount'
        self.original_website_id = 12
        self.params = {
            'response': 'application/json',
            'pageIndex': 1,
            'pageSize': 22,
            'day': '',
            'sheng': 'x1',
            'qu':'',
            'xian':'',
            'title':'',
            'timestart':'',
            'timeend':'',
            'categorynum': '002001004',
            'siteguid': '9f5b36de-4e8f-4fd6-b3a1-a8e08b38ea38',
        }

    def get_detail_producer(self):
        '''
        获取标书详情页生产者
        '''
        print('获取标书详情页生产者线程开始')
        filter = {'is_html': 1, 'cz': {'$ne': 1}}
        update = {'$set':{'is_html':0}}
        self.qg_hns_menu_j.update_many(filter,update)
        filter_ = {'is_html': {'$ne': 1}, 'cz': {'$ne': 1}}
        update_ = {'$set': {'is_html': 1}}  # $inc      $set
        proj = {}
        while True:
            try:
                data = self.qg_hns_menu_j.find_one_and_update(filter_, update_, proj,return_document=ReturnDocument.AFTER)
                if data is None:
                    print("数据获取任务结束！！！",    datetime.now())
                    break
                data['title'] = data["tile"]
                data.pop('tile')
                self.detail_queue.put(data)
            except Exception as  err:
                print('目录生产者报错 报错是%s' % err)
                time.sleep(50)
                continue
            # self.start(item=data)
        for i in range(self.thread_num):
            self.detail_queue.put(None)
        print('获取标书详情页生产者线程结束')

    def get_html_id_xpath(self, response_obj):
        '''
        获取html_id和正文xpath
        '''
        html_id = conf_util.get_all_text(res=response_obj,xpath='//div[@class="ewb-left-bd"]//text()')
        if html_id:
            return {'status': 200, 'html_id': html_id, "text_xpath": '//div[@class="ewb-left-bd"]',"html": response_obj}
        return {'status': 404, 'html_id': '', 'text_xpath': ''}

    def get_one_page(self, item):
        '''
        发送标书详情页请求
        '''
        url = item['originalurl']
        item.pop('_id')
        item['xpath_err'] = 0
        try:
            response = self.send_rquest_get(url)
            self.save_one_page_data(response=response,item=item)
        except Exception as err:
            self.qg_hns_menu_j.update_one({'originalurl':url}, {'$set':{'cz':1}})
            logger.error(f'获取标书详情页请求err  info::{err}')

    def save_one_page_data(self,response,item):
        '''
        保存标书详情页数据
        '''
        fipd = {'originalurl':item['originalurl']}
        etree_obj = etree.HTML(response)
        etree_obj = conf_util.remove_js_css(etree_obj)
        url = item['originalurl']
        TwoLvTitle = etree_obj.xpath('//*[@class="ewb-left l"]/*[@class="ewb-left-tt"]/text()')[0]
        item["Bid_data_acquisition_format"] = "HTML"
        item["originalurl_data_from"] = {
            "url": url,
            "method": "get",
            "request_only_data": "",
            "response_only_data": ""
        }
        item["file_json"] = ""
        datas = self.get_html_id_xpath(response)
        if datas["status"] == 200:
            item['html_id'] = datas['html_id']
            item['html'] = datas['html']
            # print(item['html_id'])
            if self.qg_hns_data.find_one({"html_id": item['html_id']}, {"_id": 1}) is None:
                item['xpath_err'] = 0
            else:
                item['xpath_err'] = 10
        else:
            item['xpath_err'] = 1
            item["html"] = response
        item['TwoLvTitle'] = TwoLvTitle
        item['is_parse_html'] = 0
        item['is_html'] = 0
        item['province'] = '河南省'
        item['tenderaddress'] = f'河南省-{item["city"]}'
        item['text_xpath'] = datas['text_xpath']
        item['utime'] = datetime.now()
        item["weather_have_iframe"] = 0  #
        item["weather_have_image"] = 0  #
        item["weather_have_pdf"] = 0  #
        item["weather_have_pdf_type2"] = 0  #
        item['weather_have_enclosure'] = 0  # 是否有附件
        item["url_type"] = 'html'  #
        item["original_website_id"] = self.original_website_id  #
        item["weather_have_blank_url"] = 0  #
        self.qg_hns_data.update_one(fipd,{'$set':item},upsert=True)
        print(f'{item["originalurl"]}\n保存并更新数据完成：{item["title"]} ！！！！')
        self.qg_hns_menu_j.update_one(fipd, {'$set': {'cz': 1}})

    def get_detail_consumer(self):
        '''
        获取标书详情页消费者线程
        '''
        print('获取标书详情页消费者线程开始')
        while True:
            if self.detail_queue.empty():
                time.sleep(10)
                continue
            item = self.detail_queue.get()
            if item:
                self.get_one_page(item=item)
            else:
                break
        print('获取标书详情页消费者线程结束')

    def get_max_page(self,data):
        try:
            url = self.page_url + '?' + urllib.parse.urlencode(data)
            html_str = self.send_rquest_get(url)
            max_data_count = int(re.search('\d+',html_str).group())
            max_page = math.ceil(max_data_count/22)
            return max_page
        except Exception as err:
            logger.error(f'获取最大页码err info::{err}')
            sys.exit(-1)

    def zengliang_spider(self, filte_, item):
        Time_Int = int(time.time())
        zengliang_flag = False
        pageSize = 22
        try:
            menu_page = self.page_jl.find_one(filte_, {'page': 1, 'old_time': 1, 'number': 1})
            if menu_page is None:
                menu_page = {}
            page = menu_page.get('page',1)
            Old_time = menu_page.get('old_time',Time_Int)
            number = menu_page.get('number',0)
            if number > 0:
                Old_time = Time_Int
                zengliang_flag = True
                page = 1
            upsert_data = {
                'sourceplatform': self.sourceplatform,
                'industry': self.menu_dict[item][0],
                'pageSize': pageSize,
                'categorynum': item,
                'link': 'http://hnsggzyfwpt.hndrc.gov.cn/',
                'old_time': Old_time,
                'page': page
            }
            self.page_jl.update_one(filte_,{'$set':upsert_data},upsert=True)
            return {'page':page,'old_time':Old_time,'number':number,'zengliang_flag':zengliang_flag}
        except Exception as err:
            logger.error(f'增量err info::{err}')

    def get_one_type_menu(self, item):
        industry = item
        filte_ = {
            'sourceplatform': self.sourceplatform,
            'industry': self.menu_dict[item][0],
            'pageSize': 22, 'categorynum': item,
            'link': 'http://hnsggzyfwpt.hndrc.gov.cn/'
        }

        zengliang = self.zengliang_spider(filte_=filte_, item=item)
        page = zengliang['page']
        old_time = zengliang['old_time']
        finish_times = zengliang['number']
        zengliang_flag = zengliang['zengliang_flag']
        data = {}
        data.update(self.params)
        data['categorynum'] = industry
        end_page = self.get_max_page(data)
        current_time = int(time.time())

        while True:
            print(1111111111111111)
            data['pageIndex'] = page
            url = self.url + '?' + urllib.parse.urlencode(data)
            try:
                if page >= end_page:
                    self.page_jl.update_one(filte_, {'$set': {'number': finish_times + 1, 'old_time': current_time}})
                    return
                html_str = self.send_rquest_get(url=url)
                data_dict = json.loads(html_str)['return']
                if data_dict:
                    data_dict = json.loads(data_dict)
                    print(data_dict)
                    count = data_dict['RowCount']
                    datas = data_dict['Table']
                    timeStamp = self.parse_one_page_data(li_obj=datas, industry=industry)
                    if zengliang_flag:
                        print('判断时间！！！！')
                        if timeStamp:
                            if timeStamp <= int(old_time - 86400):
                                self.page_jl.update_one(filte_,
                                                        {'$set': {'number': finish_times + 1, 'old_time': current_time}})
                                return
                    logger.success(f'目录{self.menu_dict[industry][0]}-{self.menu_dict[industry][1]}第{page}页共获取数据{count}条！！！')
                    time.sleep(2)
                    self.page_jl.update_one(filte_, {'$set': {'page': page}})
                    page += 1
                else:
                    self.page_jl.update_one(filte_, {'$set': {'number': finish_times + 1, 'old_time': current_time}})
                    break
            except Exception as err:
                print('请求失败，url是%s' % url)


    def parse_one_page_data(self,li_obj,industry):
        '''
        解析并保存目录页数据
        '''
        timeStamp = ''
        for data in li_obj:
            item = {}
            lei = self.menu_dict[industry]
            link = 'http://hnsggzyfwpt.hndrc.gov.cn' + data['href']
            publishdata = data['infodate']
            tss1 = publishdata
            try:
                timeArray = time.strptime(tss1, "%Y-%m-%d %H:%M:%S")
            except:
                timeArray = time.strptime(data["infodate"], '%Y-%m-%d')
            timeStamp = int(time.mktime(timeArray))
            pd = self.qg_hns_menu_j.find_one({'originalurl': link})
            if not pd:
                item['originalurl'] = link
                item['publishdate'] = data['infodate']
                item['city'] = data['infoc']
                item['tile'] = data['title']
                item['category'] = data['category']
                item['industry'] = lei[0]
                item['ifbprogresstag'] = lei[1]
                item['is_pic'] = lei[2]
                item['ifbprogress'] = lei[3]
                item['province'] = self.province
                item['sourceplatform'] = self.sourceplatform
                item['is_html'] = 0
                item['cz'] = 0
                item['IP'] = get_host_ip()
                item['ctime'] = datetime.now()
                item['utime'] = datetime.now()
                self.qg_hns_menu_j.insert_one(item)
                # buffer_list.append(pymongo.UpdateOne({'originalurl':link},{'$set':item},upsert=True))
            else:
                logger.debug('此数据已存在')
        # if buffer_list:
        #     self.qg_hns_menu_j.bulk_write(buffer_list)
        return timeStamp

    def get_menu_page_producer(self):
        '''
        获取目录生产者线程结束
        '''
        print('获取目录生产者线程开始')
        for item in self.menu_dict.keys():
            self.menu_queue.put(item)
        for i in range(self.thread_num):
            self.menu_queue.put(None)
        print('获取目录生产者线程结束')

    def get_menu_page_consumer(self):
        '''
        获取目录消费者线程
        '''
        print('获取目录消费者线程开始')
        while True:
            if self.menu_queue.empty():
                time.sleep(10)
                continue
            item = self.menu_queue.get()
            if item:

                self.get_one_type_menu(item)
            else:
                break
        print('获取目录消费者线程结束')

    def screen_shot_product(self, flag=False):
        """ 截图的生产者线程 """
        print('%s:::获取截图的生产者线程开启' % datetime.now())
        i = 0
        self.qg_hns_data.update_many({'is_pic': 3,},{'$set':{'is_pic':1}})
        while True:
            try:
                one_data = self.qg_hns_data.find_one_and_update({'is_pic': 1}, {'$set':{'is_pic': 3}},{'_id':1, 'originalurl':1})
                if not one_data:
                    print('%s:::截图的生产者线程结束' % datetime.now())
                    break
                self.screen_queue.put(one_data)
                if flag:
                    break
            except Exception as err:
                if i>=5:
                    print('%s:::截图的生产者异常结束 err info: %s' % (datetime.now(), err))
                    break
                i+=1
                time.sleep(3)
        for i in range(self.thread_num):
            self.screen_queue.put(None)

    def screen_shot_consumer(self):
        """ 获取截图的消费者线程 """
        print('%s:::获取截图的消费者线程开启' % datetime.now())
        while True:
            if not self.screen_queue.qsize():
                print('%s:::截图的队列为空休息5s' % datetime.now())
                time.sleep(5)
                continue
            item = self.screen_queue.get()
            if not item:
                print('%s:::获取截图的消费者结束' % datetime.now())
                break
            pic_name = item['_id']
            url = item['originalurl']
            print('%s:::url : %s 开始截图' % (datetime.now(), url))
            page_source = conf_util.upload_image_delete_pic2(url=url,coll_name=self.qg_hns_data,id=pic_name)
            if not page_source:
                self.qg_hns_data.update_one({'_id':pic_name},{'$set':{'is_pic':1}})
            else:
                self.qg_hns_data.update_one({'_id':pic_name},{'$set':{'is_pic':2}})

    def run_thread_list(self):
        self.get_menu_page_producer()
        self.thread_name_list = [
            self.get_menu_page_producer,
            self.get_detail_producer,
            # self.screen_shot_product,
        ]
        self.more_thread_name_list = [
            self.get_menu_page_consumer,
            self.get_detail_consumer,
            # self.screen_shot_consumer,
        ]

    def run_test(self):
        max_page = self.get_max_page(self.params)
        print(max_page)

if __name__ == '__main__':
    spider = HeNanQG()
    # spider.run_test()
    spider.run()