# -*- coding: utf-8 -*-
# @Time : 2020/12/2 11:22
# @Author : zhangxing
# @File :
import json
import re

import requests
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.henan.items import HeNanShengGongGongZiYuanJiaoYiZhongXinMenHuWangZhanItem
from commonresources.spiders.basespider import BaseSpider


class HeNanShengGongGongZiYuanJiaoYiZhongXinMenHuWangZhanSpider(BaseSpider):
    """
        河南省公共资源交易中心门户网
                主  页：http://www.hnggzy.com/hnsggzy/
                详情页：http://www.hnggzy.com/hnsggzy/jyxx/002001/002001001/?Paging=1
    """
    name = "HeNanShengGongGongZiYuanJiaoYiZhongXinMenHuWangZhan"
    name_zh = "河南省公共资源交易中心门户网"
    province = "河南"
    # start_urls = ["https://www.sdbidding.org.cn/"]
    total_count = 0
    headers_cookie = ''
    scrapy_count = 0

    def __init__(self, full_dose=False):
        super(HeNanShengGongGongZiYuanJiaoYiZhongXinMenHuWangZhanSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用
        self.url = "http://www.hnggzy.com/hnsggzy/jyxx/002001/%s/?Paging=%s"

    def start_requests(self):
        start_configs = [{
            "announcement_type_num": '002001001',
            "announcement_type": "招标公告"  # http://www.hnggzy.com/hnsggzy/jyxx/002001/002001001/?Paging=14
        }, {
            "announcement_type_num": '002001003',
            "announcement_type": "中标公告"
        }]
        for obj in start_configs:
            announcement_type_num = obj['announcement_type_num']
            announcement_type = obj['announcement_type']
            yield scrapy.Request(
                url=self.url % (announcement_type_num, 1),
                callback=self.handle_response,
                headers=self.faker_headers(),
                dont_filter=True,
                meta={
                    'announcement_type_num': announcement_type_num,
                    'announcement_type': announcement_type,
                    'page_count': -1,
                    'page': 1,
                    'need_break': False,
                }
            )

    def handle_response(self, response):
        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(re.findall(r'末页.*?>\d*/(\d*)<', response.text)[0])
        objs = response.xpath('//div[@class="bd-content1"]/div/table//tr')
        for index, obj in enumerate(objs):
            item = dict()
            item['release_time'] = obj.xpath('.//font/text()').extract_first()[1:-1]

            if not self.full_dose and item['release_time'] != get_current_date():
                response.request.meta['need_break'] = True
            elif item['release_time'] < "2015-12-31":
                response.meta['need_break'] = True
            else:
                item['announcement_title'] = obj.xpath('.//a/@title').extract_first()
                item['origin_url'] = "http://www.hnggzy.com" + obj.xpath('.//a/@href').extract_first()
                item['announcement_type'] = response.meta['announcement_type']
                item['item'] = HeNanShengGongGongZiYuanJiaoYiZhongXinMenHuWangZhanItem()
                yield scrapy.Request(url=item['origin_url'],
                                     callback=self.parse_item_new,
                                     meta=item)

        if not response.meta['need_break']:
            page = response.meta['page']
            page_count = response.meta['page_count']
            if page <= page_count:
                page += 1
                yield scrapy.Request(
                    url=self.url % (response.meta['announcement_type_num'], page),
                    callback=self.handle_response,
                    headers=self.faker_headers(),
                    dont_filter=True,
                    meta={
                        'announcement_type_num': response.meta['announcement_type_num'],
                        'announcement_type': response.meta['announcement_type'],
                        'page_count': page_count,
                        'page': page,
                        'need_break': False,
                    }
                )

    def faker_headers(self):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Cookie": "ASP.NET_SessionId=xwu2toubqlqg3wmn45ia4l5g; __CSRFCOOKIE=39aaf9b2-9b35-4642-92ae-bd64b6ef683a",
            "Host": "www.hnggzy.com",
            "Referer": "http://www.hnggzy.com/hnsggzy/jyxx/002001/002001001/?Paging=316",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3861.400 QQBrowser/10.7.4313.400",
        }
        return headers
