import json
import re

import requests
import scrapy
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

# from commonresources.spider_items.jiangsu.changzhou.items import ChangZhouGongGongZiYuanJiaoYiWangItem


class ChangZhouGongGongZiYuanJiaoYiWangSpider(scrapy.Spider):
    """
        常州公共资源交易网    http://cz.jsggzy.jszwfw.gov.cn/
    """
    name = 'ChangZhouGongGongZiYuanJiaoYiWang'
    name_zh = "常州公共资源交易网 "
    province = "江苏"
    city = '常州'
    allowed_domains = ['cz.jsggzy.jszwfw.gov.cn']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        super().__init__()
        # self.page_total = -1  # 每个类型都有总页数，不用单一的页数
        self.browser_cookie = {}
        self.convert_dict = convert_dict
        self.full_dose = full_dose

    def classify(self):
        return {
            "jsgc": [
                {"construction_type": '建设工程'},
                {
                    "categorynum": "001001001",
                    "categoryname": "招标公告/资审公告"
                },
                {
                    "categorynum": "001001005",
                    "categoryname": "中标候选人公示"
                },
                {
                    "categorynum": "001001006",
                    "categoryname": "中标结果公告"
                }
            ],
            "jtgc": [
                {"construction_type": '交通工程'},
                {
                    "categorynum": "001002001",
                    "categoryname": "招标公告"
                },
                {
                    "categorynum": "001002003",
                    "categoryname": "中标公示"
                },
                {
                    "categorynum": "001002004",
                    "categoryname": "中标结果"
                }
            ],
            "slgc": [
                {"construction_type": '水利工程'},
                {
                    "categorynum": "001003001",
                    "categoryname": "招标公告"
                },
                {
                    "categorynum": "001003003",
                    "categoryname": "中标公示"
                },
                {
                    "categorynum": "001003004",
                    "categoryname": "中标结果公告"
                }
            ],
            "zfcg": [
                {"construction_type": '政府采购'},
                {
                    "categorynum": "001004002",
                    "categoryname": "采购公告"
                },
                {
                    "categorynum": "001004003",
                    "categoryname": "更正公告"
                },
                {
                    "categorynum": "001004006",
                    "categoryname": "成交公告"
                }
            ],
            "tdky": [
                {"construction_type": '土地矿产'},
                {
                    "categorynum": "001005001",
                    "categoryname": "交易公告"
                },
                {
                    "categorynum": "001005003",
                    "categoryname": "结果公示"
                },
                {
                    "categorynum": "001005004",
                    "categoryname": "历年成交信息"
                }
            ],
            "gycq": [
                {"construction_type": '国有产权'},
                {
                    "categorynum": "001006001",
                    "categoryname": "交易公告"
                },
                {
                    "categorynum": "001006003",
                    "categoryname": "成交公告"
                }
            ],
            "qtjy": [
                {"construction_type": '其他交易'},
                {
                    "categorynum": "001009001",
                    "categoryname": "交易公告"
                },
                {
                    "categorynum": "001009002",
                    "categoryname": "成交公示"
                }
            ]
        }

    def parse(self, response):
        pass

    def get_page_count(self, categorynum):
        fake_page_data = self.fake_page_data(categorynum)
        url = 'http://cz.jsggzy.jszwfw.gov.cn/czggzyweb/jyxxAction.action?cmd=initPageCount'
        options_count = int(requests.post(url, data=fake_page_data).json().get('custom', 0))
        if not options_count:
            return 0
        else:
            return options_count // 15 + 1

    def start_requests(self):
        classify = self.classify()
        url_page_list = 'http://cz.jsggzy.jszwfw.gov.cn/czggzyweb/jyxxAction.action?cmd=initPageList'
        for i in classify:
            classify_second = classify[i]
            construction_type = classify_second[0].get('construction_type')
            for j in range(1, len(classify_second)):
                categorynum = classify_second[j]['categorynum']
                item_categoryname = classify_second[j]['categoryname']
                page_count = self.get_page_count(categorynum)
                if not page_count:
                    continue
                yield scrapy.FormRequest(url=url_page_list,
                                         formdata=self.fake_list_data(0, categorynum),
                                         dont_filter=True,
                                         callback=self.handle_response,
                                         meta={
                                             'construction_type':construction_type,
                                             'categorynum':categorynum,
                                             'item_categoryname':item_categoryname,
                                             'page_count': page_count,
                                             'need_break': False,
                                         })

    def fake_page_data(self, categorynum):
        return {
            "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a",
            "cityCode": "",
            "type": f"{categorynum[0:6]}",
            "categorynum": f"{categorynum}",
            "title": "",
            "chanquanleibie": "",
        }

    def fake_list_data(self, page, categorynum):
        return {
            "pageIndex": f"{page}",
            "pageSize": "15",
            "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a",
            "cityCode": "",
            "type": f"{categorynum[0:6]}",
            "categorynum": f"{categorynum}",
            "title": "",
            "chanquanleibie": "",
        }

    def handle_response(self, response):
        json_obj = json.loads(response.json()['custom'])['Table']
        for row in json_obj:
            item = dict()
            item['release_time'] = row['infodate']
            if not self.full_dose and item['release_time'] != get_current_date():
                response.request.meta['need_break'] = True
            else:
                item['announcement_title'] = row['title']
                if '【交易公告】' not in item['announcement_title']:
                    # jiaoyi公告表示还未完成的交易，当完成后该公告重新更新
                    item['project_area'] = row['xiaqu']
                    # html_categorynum = row['categorynum']
                    item['origin_url'] = 'http://cz.jsggzy.jszwfw.gov.cn/jyzx/' + response.meta['categorynum'][0:6] +\
                                         '/' + response.meta['categorynum'] + '/' + ''.join(item['release_time'].split('-')) +\
                                         '/' + row['infoid'] + '.html'
                    item['construction_type'] = response.request.meta['construction_type']
                    item['info_type'] = response.request.meta['item_categoryname']
                    yield scrapy.Request(url=item['origin_url'],
                                         callback=self.handle_detail_response,
                                         meta=item,
                                         )
        if not response.meta['need_break']:
            if response.request.meta['page'] < response.request.meta['page_count']:
                url_page_list = 'http://cz.jsggzy.jszwfw.gov.cn/czggzyweb/jyxxAction.action?cmd=initPageList'
                yield scrapy.FormRequest(url=url_page_list,
                                         formdata=self.fake_list_data(response.request.meta['page']+1, response.request.meta['categorynum']),
                                         dont_filter=True,
                                         callback=self.handle_response,
                                         meta={
                                             'construction_type': response.request.meta['construction_type'],
                                             'categorynum': response.request.meta['categorynum'],
                                             'item_categoryname': response.request.meta['item_categoryname'],
                                             'page_count': response.request.meta['page_count'],
                                             'need_break': False,
                                         })

    def handle_detail_response(self, response):
        item = ChangZhouGongGongZiYuanJiaoYiWangItem()
        item['project_area'] = response.meta['project_area']
        item['release_time'] = response.meta['release_time']
        item['construction_type'] = response.meta['construction_type']
        item['origin_url'] = response.meta['origin_url']
        item['announcement_title'] = response.meta['announcement_title']
        item['html'] = response.text#[0:40]
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
