import re

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.hubei.items import HuBeiShengDianZiZhaoTouBiaoJiaoYiPingTaiItem
from commonresources.spiders.basespider import BaseSpider


class HuBeiShengDianZiZhaoTouBiaoJiaoYiPingTaiSpider(BaseSpider):
    """
        湖北省电子招投标交易平台     http://www.hbbidcloud.cn/
    """
    name = 'HuBeiShengDianZiZhaoTouBiaoJiaoYiPingTai'
    name_zh = "湖北省电子招投标交易平台"
    province = "湖北"

    # start_urls = ['https://www.hbggzyfwpt.cn/jyxx/jsgcXmxx']

    def __init__(self, full_dose=False):
        super(HuBeiShengDianZiZhaoTouBiaoJiaoYiPingTaiSpider, self).__init__(full_dose)

    def parse(self, response):
        pass

    def fake_headers(self):
        return {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            # "Cookie": "td_cookie=2515577220",
            "Host": "www.hbbidcloud.cn",
            # "Referer": "http://www.hbbidcloud.cn/hubei/jyxx/004005/004005003/5.html",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
        }

    def start_requests(self):
        yield scrapy.Request(
            url="http://www.hbbidcloud.cn/hubei/jyxx/about.html",
            headers=self.fake_headers(),
            callback=self.handle_response,
        )

    def handle_response(self, response):
        objs = response.xpath('//ul[@id="left-menu"]/li')
        for options_obj in objs:
            announcement_type = options_obj.xpath('./h3/a/text()').extract_first()
            options = options_obj.xpath('./div/ul/li/a')
            for option in options:
                info_type = option.xpath('./text()').extract_first()
                href = "http://www.hbbidcloud.cn" + option.xpath('./@href').extract_first()
                o_url = 'http://www.hbbidcloud.cn/' +"/".join(option.xpath('./@href').extract_first().split('/')[0:-1])
                categoryNum = option.xpath('./@href').extract_first().split('/')[-2]
                yield scrapy.Request(
                    url=href,
                    headers=self.fake_headers(),
                    callback=self.handle_response_response,
                    meta={
                        "page": 1,
                        'o_url': o_url,
                        'categoryNum': categoryNum,
                        "page_count": -1,
                        "announcement_type": announcement_type,
                        "info_type": info_type,
                        "need_break": False,
                    }
                )

    def handle_response_response(self, response):
        if response.meta["page_count"] == -1:
            try:
                page_size, total = re.findall(r'pageSize: (\d*),\s+total: (\d*),', response.text)[0]
                response.meta["page_count"] = int(total) // int(page_size) + 1
            except Exception as e:
                response.meta["page_count"] = 0
                print(f'{response.url},本页无内容')

        objs = response.xpath('//ul[@class="wb-data-item"]/li')
        for obj in objs:
            item = dict()
            item['release_time'] = obj.xpath('./span/text()').extract_first().strip()
            if not self.full_dose and item['release_time'] != get_current_date():
                response.meta['need_break'] = True
            else:
                item['announcement_title'] = obj.xpath('./div/a/text()').extract_first().strip()
                item['origin_url'] = 'http://www.hbbidcloud.cn' + obj.xpath('./div/a/@href').extract_first()
                item['announcement_type'] = response.meta['announcement_type']
                item['info_type'] = response.meta['info_type']
                if '【' in item['announcement_title']:
                    item['city'] = re.findall(r'【(.*?)】', item['announcement_title'])[0]
                item['item'] = HuBeiShengDianZiZhaoTouBiaoJiaoYiPingTaiItem()
                yield scrapy.Request(url=item['origin_url'],
                                     callback=self.parse_item,
                                     meta=item,
                                     dont_filter=True,
                                     )

        if response.meta['page_count'] and not response.meta['need_break']:
            page = response.meta['page']
            page_count = response.meta['page_count']
            if page < page_count:
                page += 1
                if page >= 101:
                    url = f"{response.meta['o_url']}/about.html?categoryNum={response.meta['categoryNum']}&pageIndex={page}"
                else:
                    url = f"{response.meta['o_url']}/{page}.html"
                yield scrapy.Request(
                    url=url,
                    headers=self.fake_headers(),
                    callback=self.handle_response_response,
                    meta={
                        "page": page,
                        'o_url': response.meta['o_url'],
                        'categoryNum': response.meta['categoryNum'],
                        "page_count": page_count,
                        "announcement_type": response.meta['announcement_type'],
                        "info_type": response.meta['info_type'],
                        "need_break": False,
                    }
                )
