import json
import re

import scrapy
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.suqian.items import SuqianShiGongGongZiYuanJiaoYiDianZiFuWuPingTaiItem


class SuqianShiGongGongZiYuanJiaoYiDianZiFuWuPingTaiSpider(scrapy.Spider):
    """
        宿迁市公共资源交易电子服务平台    http://ggzy.sqzwfw.gov.cn/
    """
    name = 'SuqianShiGongGongZiYuanJiaoYiDianZiFuWuPingTai'
    name_zh = "宿迁市公共资源交易电子服务平台 "
    province = "江苏"
    city = '宿迁'
    allowed_domains = ['ggzy.sqzwfw.gov.cn']

    # start_urls = ['http://ggzy.sqzwfw.gov.cn/']
    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        super().__init__()
        self.page_total = -1
        self.browser_cookie = {}
        self.convert_dict = convert_dict
        self.full_dose = full_dose

    def parse(self, response):
        pass

    def start_requests(self):
        url = "http://ggzy.sqzwfw.gov.cn/WebBuilder/jyxxAction.action?cmd=getListByCount"
        data = {
            "categorynum": "001",
            "city": "",
            "xmmc": "",
        }
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            # "Content-Length": "27",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            # "Cookie": "JSESSIONID=9AD5B5C0CDC989EDFBCD36F319DE998D",
            "Host": "ggzy.sqzwfw.gov.cn",
            "Origin": "http://ggzy.sqzwfw.gov.cn",
            "Referer": "http://ggzy.sqzwfw.gov.cn/jyxx/tradeInfo.html",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest",
        }
        yield scrapy.FormRequest(url, formdata=data, headers=headers, callback=self.handle_page_response)

    def fake_data(self, page):
        formdata = {
            "categorynum": "001",
            "city": "",
            "xmmc": "",
            "pageIndex": f"{page}",
            "pageSize": "15",
        }
        return formdata

    def handle_page_response(self, response):
        url = 'http://ggzy.sqzwfw.gov.cn/WebBuilder/jyxxAction.action?cmd=getList'
        self.page_total = int(re.search(r'"custom":(.*?),"status"', response.text).group(1))
        data = self.fake_data(0)
        yield scrapy.FormRequest(url, formdata=data, meta={'page': 0, 'need_break': False},
                                 callback=self.handle_next_html)

    def handle_next_html(self, response):
        obj_json = json.loads(response.text)
        obj_json_page = obj_json['custom']
        rows_info_list = json.loads(obj_json_page)['Table']
        for row in rows_info_list:
            item = dict()
            item['project_area'] = row['city']
            item['construction_type'] = str(row['type']).strip('][')
            item['release_time'] = row['postdate']
            if not self.full_dose and item['release_time'] != get_current_date():
                response.meta['need_break'] = True
            item['origin_url'] = "http://ggzy.sqzwfw.gov.cn" + row['href']
            item['announcement_title'] = row['title']
            yield scrapy.Request(item['origin_url'], meta=item, callback=self.handle_detail_response, dont_filter=True)
        if not response.meta['need_break']:
            page = response.request.meta['page']
            if page < self.page_total:
                page += 1
                next_page_url = 'http://ggzy.sqzwfw.gov.cn/WebBuilder/jyxxAction.action?cmd=getList'
                yield scrapy.FormRequest(url=next_page_url, formdata=self.fake_data(page),
                                         meta={'page': page, 'need_break': False},
                                         callback=self.handle_next_html)

    def handle_detail_response(self, response):
        item = SuqianShiGongGongZiYuanJiaoYiDianZiFuWuPingTaiItem()
        item['project_area'] = response.meta['project_area']
        item['release_time'] = response.meta['release_time']
        item['construction_type'] = response.meta['construction_type']
        item['origin_url'] = response.meta['origin_url']
        item['announcement_title'] = response.meta['announcement_title']
        item['html'] = response.text
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
