import re

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.jiangsu.items import JiangSuZhengFuCaiGouWangItem


class JiangSuZhengFuCaiGouWangSpider(scrapy.Spider):
    """
        江苏政府采购网    http://www.ccgp-jiangsu.gov.cn/
    """
    name = 'JiangSuZhengFuCaiGouWang'
    name_zh = "江苏政府采购网"
    province = "江苏"
    city = ''
    allowed_domains = ['ccgp-jiangsu.gov.cn']

    start_urls = ['http://www.ccgp-jiangsu.gov.cn/ggxx/zgysgg/']
    """full_type: 
    True:是否提取全量类型
    False:提取中标公告和招标公告
    """

    def __init__(self, full_dose=False, not_full_type=False):
        """        :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        self.not_full_type = not_full_type
        super().__init__()

    def xpath_helper(self, response, xpath):
        result = response.xpath(xpath)
        result_list = [i.xpath('./text()').extract_first() for i in result][1:]
        return result_list

    def parse(self, response):
        objs = response.xpath('//div[@class="list_con"]/ul/li/a')
        for obj in objs:
            href = "http://www.ccgp-jiangsu.gov.cn/ggxx/zgysgg/" + obj.xpath('./@href').extract_first()
            construction_type = obj.xpath('./text()').extract_first()
            if self.not_full_type and construction_type not in ["公开招标公告", '邀请招标公告', '中标公告']:
                continue
            if construction_type == "合同公告":
                yield scrapy.Request(
                    url=href,
                    callback=self.handle_response2,
                    meta={
                        'city': "",
                        'construction_type': construction_type,
                        'page': 0,
                        'need_break': False,
                    },
                )
            else:
                yield scrapy.Request(
                    url=href,
                    callback=self.handle_response,
                    meta={
                        "construction_type": construction_type,
                    }
                )

    def handle_response(self, response):
        citys = response.xpath('//div[@class="tab tabline"]//ul/li//a')
        for city in citys:
            href = response.url + city.xpath("./@href").extract_first()
            province = city.xpath("./text()").extract()[0]
            if province == "全部":
                continue
            yield scrapy.Request(
                url=href,
                callback=self.handle_response2,
                meta={
                    'city': province,
                    'construction_type': response.meta['construction_type'],
                    'page': 0,
                    'need_break': False,
                },
            )

    def handle_response2(self, response):
        if "page_count" not in response.meta:
            response.meta['page_count'] = int(
                re.findall(r'createPageHTML\((\d+?), 0, "index", "html"\);', response.text)[0]) - 1

        objs = response.xpath('//div[@class="list_list"]/ul/li')
        for obj in objs:
            item = dict()
            item['release_time'] = obj.xpath('./span/text()').extract_first()
            if not self.full_dose and item['release_time'] != get_current_date():
                response.meta['need_break'] = True
            elif item['release_time'] < "2015-12-31":
                response.meta['need_break'] = True
            else:
                url_end = obj.xpath("./a/@href").extract_first()
                if response.meta['construction_type'] != "合同公告" and "index_" in response.url:
                    item['origin_url'] = "/".join(str(response.url).split('/')[0:-1]) + "/" + url_end
                else:
                    item['origin_url'] = response.url + url_end
                item['announcement_title'] = obj.xpath("./a/text()").extract_first()
                item['construction_type'] = response.meta['construction_type']
                item['city'] = response.meta['city']
                yield scrapy.Request(
                    url=item['origin_url'],
                    callback=self.parse_html,
                    meta=item,
                )
        if not response.meta['need_break']:
            if response.meta['page_count'] == 0:
                pass
            else:
                page = response.meta['page']
                page_count = response.meta['page_count']
                if page < page_count:
                    page += 1
                    if "index_" in response.url:
                        url = "/".join(str(response.url).split('/')[0:-1]) + '/index_' + str(page) + '.html'
                    else:
                        url = response.url + 'index_' + str(page) + '.html'
                    yield scrapy.Request(
                        url=url,
                        callback=self.handle_response2,
                        meta={
                            'city': response.meta['city'],
                            'construction_type': response.meta['construction_type'],
                            'page': page,
                            'page_count': page_count,
                            'need_break': False,
                        },
                        dont_filter=True,
                    )

    def parse_html(self, response):
        item = JiangSuZhengFuCaiGouWangItem()
        item['html'] = response.text
        item["announcement_title"] = response.meta['announcement_title']
        item["release_time"] = response.meta["release_time"]
        item['origin_url'] = response.meta['origin_url']
        item["construction_type"] = response.meta['construction_type']
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province

        item['project_city'] = response.meta['city']
        yield item
