import re

import scrapy
import json

from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.suzhou.items import SuZhouGongGongZiYuanJiaoYiPingTaiItem


class SuZhouGongGongZiYuanJiaoYiPingTaiSpider(scrapy.Spider):
    """
    苏州公共资源交易平台 交易信息列表页获取爬虫
    http://www.szzyjy.com.cn/
    """
    name = 'SuZhouGongGongZiYuanJiaoYiPingTai'
    name_zh = '苏州公共资源交易平台'
    province = "江苏"
    city = '苏州'
    allowed_domains = ['szzyjy.com.cn']


    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        super().__init__()
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose

    def get_page_headers(self):
        return {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Host": "www.szzyjy.com.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
        }

    def start_requests(self):
        url = 'http://www.szzyjy.com.cn/jyxx/tradeInfo.html'
        yield scrapy.Request(url, callback=self.parse_start_requests)

    def parse_start_requests(self, response):
        options_all = re.findall(r'total: (.*?),', response.text)[0]
        self.page_count = int(options_all) // 15 + 1
        next_page_url = f"http://www.szzyjy.com.cn/EpointWebBuilder/JyxxSearchAction.action?cmd=getList1&categorynum=003&diqu=%E8%8B%8F%E5%B7%9E%E5%B8%82&xmmc=&zstype=&zblx=&starttime=&endtime=&siteguid=7eb5f7f1-9041-43ad-8e13-8fcb82ea831a&pageIndex=0&pageSize=15"
        yield scrapy.Request(url=next_page_url, headers=self.get_page_headers(), meta={'page': 0, 'need_break': False},
                             callback=self.handle_response)

    def handle_response(self, response):
        obj_json = json.loads(response.text)
        obj_json = json.loads(obj_json['custom'])
        rows_info_list = obj_json['Table']
        for row in rows_info_list:
            item = dict()
            if row['index'] == "-14":
                continue
            item['release_time'] = row['postdate']
            if not self.full_dose and item['release_time'] != get_current_date() :# and item['release_time'] != "2020-10-28":
                response.meta['need_break'] = True
            else:
                item['construction_type'] = row['jyfl']
                item['project_area'] = row['city']
                item['source_type'] = self.name
                item['project_categorynum'] = row['categorynum']
                url_part_1 = row['categorynum'][0:6]
                url_part_2 = row['categorynum']
                url_part_3 = ''.join(row['postdate'].split('-'))
                url_part_4 = row['infoid']
                item['origin_url'] = "http://www.szzyjy.com.cn/jyxx/" + url_part_1 + "/" + url_part_2 + "/" + url_part_3 + "/" + url_part_4 + ".html"
                item['announcement_title'] = row['title']
                yield scrapy.Request(url=item['origin_url'], meta=item, callback=self.handle_next_html,
                                     cookies=self.browser_cookie, )

        if not response.meta['need_break']:
            page = response.meta['page']
            if page < self.page_count:
                page += 1
                next_page_url = f'http://www.szzyjy.com.cn/EpointWebBuilder/JyxxSearchAction.action?cmd=getList1&categorynum=003&diqu=%E8%8B%8F%E5%B7%9E%E5%B8%82&xmmc=&zstype=&zblx=&starttime=&endtime=&siteguid=7eb5f7f1-9041-43ad-8e13-8fcb82ea831a&pageIndex={page}&pageSize=15'
                yield scrapy.Request(url=next_page_url, callback=self.handle_response,
                                     headers=self.get_page_headers(), cookies=self.browser_cookie,
                                     meta={'page': page, 'need_break':False}, errback=self.handle_error, dont_filter=True)

    def handle_next_html(self, response):
        item = SuZhouGongGongZiYuanJiaoYiPingTaiItem()
        item['project_area'] = response.meta['project_area']
        item['release_time'] = response.meta['release_time']
        item['origin_url'] = response.meta['origin_url']
        item['announcement_title'] = response.meta['announcement_title']
        item['html'] = response.text
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item

    def handle_error(self, failure):
        pass