import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.beijing.items import BeiJingShiZhengFuCaiGouWangItem
from commonresources.spiders.basespider import BaseSpider


class BeiJingShiZhengFuCaiGouWangSpider(BaseSpider):
    """
        北京市政府采购网     http://www.ccgp-beijing.gov.cn/
    """
    name = 'BeiJingShiZhengFuCaiGouWang'
    name_zh = "北京市政府采购网"
    province = "北京"
    allowed_domains = ['ccgp-beijing.gov.cn']

    start_urls = ['http://www.ccgp-beijing.gov.cn/xxgg/index.html']

    def __init__(self, full_dose=False):
        super(BeiJingShiZhengFuCaiGouWangSpider, self).__init__(full_dose)

    def parse(self, response):
        objs = response.xpath('//ul/li[contains(@id,"gg/index.html")]')
        for obj in objs:
            href = "http://www.ccgp-beijing.gov.cn/xxgg/" + obj.xpath('./@id').extract_first()
            if "sj" in href:
                info_type = "市级"
            else:
                info_type = "区级"
            construction_type = obj.xpath('./text()').extract_first()
            yield scrapy.Request(
                url=href,
                callback=self.handle_response,
                headers=self.fake_headers(),
                dont_filter=True,
                errback=self.handle_error,
                meta={
                    "need_break": False,
                    "page": 0,
                    "info_type": info_type,
                    "construction_type": construction_type,
                    "o_url": href[0:-5],
                }
            )

    def handle_error(self, failure):
        print(failure)
        print(
            f"本条目已结束，信息如下:url:{failure.request.url},construction_type:{failure.request.meta['construction_type']},info_type：{failure.request.meta['info_type']}，总页数：{failure.request.meta['page'] + 1}")

    def fake_headers(self):
        return {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3823.400 QQBrowser/10.7.4307.400"
        }

    def handle_response(self, response):
        if "页" in response.text:
            objs = response.xpath('//ul[@class="xinxi_ul"]/li')
            for obj in objs:
                item = dict()
                item['release_time'] = obj.xpath('./span/text()').extract_first()
                if not self.full_dose and item['release_time'] != get_current_date():
                    response.meta['need_break'] = True
                else:
                    item['announcement_title'] = obj.xpath('./a/text()').extract_first()
                    item['origin_url'] = "/".join(response.url.split('/')[0:-1]) + '/' + obj.xpath(
                        './a/@href').extract_first()
                    item['announcement_type'] = response.meta['construction_type']
                    # item['info_type'] = response.meta['info_type']  # 要不要无所谓
                    item['item'] = BeiJingShiZhengFuCaiGouWangItem()
                    yield scrapy.Request(url=item['origin_url'],
                                         callback=self.parse_item,
                                         meta=item,
                                         dont_filter=True,
                                         )
            if not response.meta['need_break']:
                page = response.meta['page'] + 1
                yield scrapy.Request(
                    url=response.meta['o_url']+ f"_{page}.html",
                    callback=self.handle_response,
                    headers=self.fake_headers(),
                    dont_filter=True,
                    errback=self.handle_error,
                    meta={
                        "need_break": False,
                        "page": page,
                        "info_type": response.meta['info_type'],
                        "construction_type": response.meta['construction_type'],
                        "o_url":response.meta['o_url'],
                    }
                )