import json
import re

import scrapy
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.yancheng.items import YanChengShiGongGongZiYuanJiaoYiWangItem


class YanChengShiGongGongZiYuanJiaoYiWangSpider(scrapy.Spider):
    """
        盐城市公共资源交易网   http://www.ycsggzy.com/
    """
    name = 'YanChengShiGongGongZiYuanJiaoYiWang'
    name_zh = '盐城市公共资源交易网'
    province = "江苏"
    city = '盐城'
    allowed_domains = ['www.ycsggzy.com']
    start_urls = [
        'http://www.ycsggzy.com/EpointWebBuilder/xyxxInfoListAction.action?cmd=getInfolist&categorynum=003&city=&title=&pageSize=20&pageIndex=0']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        self.page_count = 0
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def json_loads(self, res):
        obj_json = json.loads(res.text)
        obj_json_page = obj_json['custom']
        return json.loads(obj_json_page)

    def parse(self, response):
        self.page_count = int(self.json_loads(response)['totalcount'] // 20) + 1
        url = f'http://www.ycsggzy.com/EpointWebBuilder/xyxxInfoListAction.action?cmd=getInfolist&categorynum=003&city=&title=&pageSize=20&pageIndex=0'
        yield scrapy.Request(url, callback=self.handle_next_html, meta={
            'page': 0,
            'need_break': False
        })

    def handle_next_html(self, response):
        print(response.text)
        rows_info_list = self.json_loads(response)['Table']


        for row in rows_info_list:
            item = dict()
            item['release_time'] = row['infodate']
            if not self.full_dose and item['release_time'] != get_current_date():
                response.meta['need_break'] = True
            else:
                item['origin_url'] = "http://www.ycsggzy.com/" + row['href']
                item['announcement_title'] = row['title']
                if '【' in item['announcement_title'] and '】' in item['announcement_title']:
                    # item['project_area'] = re.match(r"【(.*?)】", item['announcement_title']).group(1)
                    item['announcement_type'] = re.match(r"【(.*?)】", item['announcement_title']).group(1)
                else:
                    item['announcement_type'] = 0
                yield scrapy.Request(item['origin_url'], meta=item, callback=self.handle_response)

        if not response.meta['need_break']:
            page = response.meta['page']
            if page < self.page_count:
                page += 1
                url = f'http://www.ycsggzy.com/EpointWebBuilder/xyxxInfoListAction.action?cmd=getInfolist&categorynum=003&city=&title=&pageSize=20&pageIndex={page}'
                yield scrapy.Request(url, callback=self.handle_next_html, meta={
                    'page': page,
                    'need_break': False
                })

    def handle_response(self, response):
        item = YanChengShiGongGongZiYuanJiaoYiWangItem()
        # item['announcement_type'] = response.request.meta['announcement_type']  # spidertools中baseitem暂时未更新
        item['release_time'] = response.request.meta['release_time']
        item['origin_url'] = response.request.meta['origin_url']
        item['announcement_title'] = response.request.meta['announcement_title']
        item['html'] = response.text#[0:40]
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
