#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import scrapy
import math

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, urlencode, urllib
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class Gansuwuwei_numPolicy(BaseTaxPolicySpider):
    name = 'gansu_wuwei_num2_policy'
    
    province = '甘肃省'
    city: str = "武威市"  # 取表格
    # county: str = ""  # 取表格
    park: str = ""  # 取表格
    page_size: int = 45
    url = "https://www.gswuwei.gov.cn/col/wuwei_num/index.html"

    @classmethod
    def update_settings(cls, settings) -> None:
        downloader_middlewares = settings.getdict("DOWNLOADER_MIDDLEWARES")
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "DOWNLOADER_MIDDLEWARES": {
                    # "scrapy.downloadermiddlewares.useragent.UserAgentMiddleware": None,  # 为 None 失效
                    "scrapy.downloadermiddlewares.retry.RetryMiddleware": None,  # 为 None 失效
                    "components.middlewares.downloadmiddlewares.public.useragent_random.RandomUserAgentMiddleware": 550,
                    "components.middlewares.downloadmiddlewares.public.company_ip_by_api.CompanyIpByApiMiddleware": 551,
                    "components.middlewares.downloadmiddlewares.public.error_collector.ErrorCollectorMiddleware": 552,
                    'components.middlewares.downloadmiddlewares.private.rs_6_middlewares.Rs6RetryMiddleware': 553,
                },
                "DOWNLOAD_TIMEOUT": 200,
                # 'CONCURRENT_REQUESTS': 1,
                # "HTTPERROR_ALLOWED_CODES": [400, 404, 500, 200, 202, 502, 412],
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def start_requests(self):
        for source, county, url in [
            ["武威市交通运输局", "", "https://www.gswuwei.gov.cn/col/col846/index.html"],
            ["武威市财政局", "", "https://www.gswuwei.gov.cn/col/col843/index.html?number="],
            ["武威市商务局", "", "https://www.gswuwei.gov.cn/col/col848/index.html"],
            ["武威市市场监督管理局", "", "https://www.gswuwei.gov.cn/col/col864/index.html"],
            ["武威市农业农村局", "", "https://www.gswuwei.gov.cn/col/col858/index.html"],
            ["武威市文化广播电视和旅游局", "", "https://www.gswuwei.gov.cn/col/col860/index.html"],
        ]:
            yield scrapy.Request(url, callback=self.parse_content, meta={"item": {'source': source, 'county': county}})

    def parse_content(self, response):
        meta = response.meta
        area = response.xpath('//iframe').re_first(r"""area=[%20]*(\s*.*?)&""")
        divid = response.xpath('//iframe').re_first(r'divid=(.*?)"')
        url = response.urljoin('/module/xxgk/search.jsp')
        params = {'infotypeId': '', 'divid': divid, 'area': area, 'jdid': '1'}
        data = {'area': area}
        if response.url:
            headers = {"Referer": response.url}
        else:
            headers = {}
        if url[-1] != "?":
            url += "?"
        temp_url = url
        temp_url += urllib.parse.urlencode(params)
        meta.update({
            "data": data,
            "params": params,
            "url": url,
            "headers": headers,
        })
        yield self.FormRequest(
            temp_url,
            formdata=data,
            method="POST",
            callback=self.parse_list,
            meta=meta
        )

    def parse_list(self, response, **kwargs):
        prev_item = response.meta.get('item')
        meta = response.meta
        url = meta['url']
        for elem in response.xpath("//tr"):
            item = Item()
            item['source_url'] = elem.xpath(".//a[contains(@href, 'htm')]/@href").get()
            if not item['source_url']:
                continue
            item['source_url'] = response.urljoin(item['source_url'])
            # item['publish_date'] = elem['publishedTimeStr'].split(" ")[0]
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            print(response.url, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        total_page = int(response.xpath("string(.)").re_first(r"共\s*(\d+)\s*页"))
        page = meta.get('page', 1)
        # total_page = math.ceil(int(total_page) / 3)
        params = meta['params']
        if meta.get("is_next") is not False:
            meta['is_next'] = False
            for i in range(2, total_page + 1):
                params['currpage'] = i
                meta['params'] = params
                meta['page'] = i
                temp_url = url
                temp_url += urllib.parse.urlencode(params)
                self.logger.info(f"{total_page} 开始爬取 第 {i} 页 params: {params}")
                yield self.FormRequest(
                    temp_url,
                    formdata=meta['data'],
                    method="POST",
                    meta=meta,
                    headers=meta['headers'],
                    callback=self.parse_list
                )

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or response.xpath("//*[@class='fbrq']/text()").get()
        if item['publish_date']:
            item['publish_date'] = item['publish_date'].split(" ")[0]
        item['content'] = response.xpath(""".""").get()
        # item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl gansu_wuwei_num2_policy".split())
