# -*- coding: utf-8 -*-
import json
import math

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, urllib, time, urlencode, datetime
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class SichuanAbazhouPolicy(BaseTaxPolicySpider):
    name = 'sichuan_abazhou_gk_policy'

    province = '四川省'
    city = '阿坝州羌族自治州'
    # county = '滑县'
    park = ''

    @classmethod
    def update_settings(cls, settings) -> None:
        downloader_middlewares = settings.getdict("DOWNLOADER_MIDDLEWARES")
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                # "COOKIES_ENABLED": False,
                "DOWNLOAD_TIMEOUT": 200,
                # "CONCURRENT_REQUESTS": 2,
                # "DOWNLOAD_DELAY": 5,
                # "DOWNLOADER_MIDDLEWARES": {
                #     "components.middlewares.downloadmiddlewares.public.jsl_middlewares.JslMiddleware": 543,
                #     **downloader_middlewares
                # }
                'DEFAULT_REQUEST_HEADERS': {
                    'Accept': 'application/json, text/plain, */*',
                    'Accept-Language': 'zh,en;q=0.9,zh-CN;q=0.8',
                    'Content-Type': 'application/json'
                }
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    headers = {
        'accept': 'application/json, text/plain, */*',
        'accept-language': 'zh,en;q=0.9,zh-CN;q=0.8',
        'cache-control': 'no-cache',
        'content-type': 'application/json',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
    }

    def start_requests(self):
        for source, county, url, path in [
            # ['阿坝州羌族自治州财政局', "", "https://czj.abazhou.gov.cn/abzczj/zfxxgk/gk.shtml?url=http://czj.abazhou.gov.cn/abzczj&fwjg=%E5%B7%9E%E8%B4%A2%E6%94%BF%E5%B1%80", "首页>政务公开>法定主动公开内容>政策文件"],
            # ['阿坝州羌族自治州壤塘县人民政府', "壤塘县", "https://rangtang.gov.cn/xtxrmzf/zfxxgk/gk.shtml?url=http://rangtang.gov.cn/xtxrmzf&fwjg=%E5%A3%A4%E5%A1%98%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=cbb218989b264a9190e5c69c8ad802a0", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
            # ['阿坝州羌族自治州汶川县人民政府', "汶川县", "https://www.wenchuan.gov.cn/wcxrmzf/zfxxgk/gk.shtml?url=http://www.wenchuan.gov.cn/wcxrmzf&fwjg=%E6%B1%B6%E5%B7%9D%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=18fdb1cd21cf44d3bfb7ce5e02610675", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
            # ['阿坝州羌族自治州理县人民政府', "理县", "https://www.ablixian.gov.cn/lxrmzf/zfxxgk/gk.shtml?url=http://www.ablixian.gov.cn/lxrmzf&fwjg=%E7%90%86%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=3aa16490c6d74a5d8afc559f2d9ede33", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
            ['阿坝州羌族自治州茂县人民政府', "茂县", "https://www.maoxian.gov.cn/mxrmzf/zfxxgk/gk.shtml?url=http://www.maoxian.gov.cn/mxrmzf&fwjg=%E8%8C%82%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=7460ec3a000c4739bee55f46e224b28c", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
            ['阿坝州羌族自治州松潘县人民政府', "松潘县", "https://www.songpan.gov.cn/spxrmzf/zfxxgk/gk.shtml?url=http://www.songpan.gov.cn/spxrmzf&fwjg=%E6%9D%BE%E6%BD%98%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=bd55abecbd974f418c760cb32c832200", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
            ['阿坝州羌族自治州九寨沟县人民政府', "九寨沟县", "https://www.jzg.gov.cn/jzgrmzf/zfxxgk/gk.shtml?url=http://www.jzg.gov.cn/jzgrmzf&fwjg=%E4%B9%9D%E5%AF%A8%E6%B2%9F%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=99aee2c5f76841d697a17218cf673eba", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
            ['阿坝州羌族自治州黑水县人民政府', "黑水县", "https://www.heishui.gov.cn/hsxrmzf/zfxxgk/gk.shtml?url=http://www.heishui.gov.cn/hsxrmzf&fwjg=%E9%BB%91%E6%B0%B4%E5%8E%BF%E6%94%BF%E5%BA%9C%E5%8A%9E&deptId=5f1e29cc458848ef928d8b90faee7d30", "首页>政务公开>政府信息公开>法定主动公开内容>行政规范性文件"],
        ]:
            item = {'source': source, 'county': county}
            yield scrapy.Request(url, callback=self.parse_content, meta={'item': item, 'path': path.split(">")[-1]}, dont_filter=True)

    def parse_content(self, response, **kwargs):
        meta = response.meta
        fwjg = response.url.split('fwjg=')[-1].split("&")[0]
        fwjg = urllib.parse.unquote(fwjg)
        websiteId = response.xpath("//meta[@name='website']/@websiteid").get()

        data = {
            "metadataShort": "fawjg",
            "metadataValue": fwjg,
            "websiteId": websiteId,
        }
        meta['fwjg'] = fwjg
        data = json.dumps(data, separators=(',', ':'))
        yield scrapy.Request(response.urljoin('/interface-cms/countMetadataValueUnboundCodeTable'), method='POST',
                             callback=self.parse_js, body=data, meta=meta, headers=self.headers)

    def parse_js(self, response, **kwargs):
        meta = response.meta
        path = meta['path']
        fwjg = meta['fwjg']
        for res in response.json()['result']:
            if res['channelName'] == path:
                chanid = res['channelId']
        # chanid = response.xpath("//*[contains(@click, '"+path+"') and contains(@click, 'changeMenu')]").re_first("changeMenu\((\d+),\s*'")
        print(response.url, chanid)
        data = {
            "channelId": [chanid],
            "fwjg": fwjg,
            "pageNum": 1,
            "pageSize": 50
        }
        meta['data'] = data
        data = json.dumps(data, separators=(',', ':'))
        yield scrapy.Request(response.urljoin('/interface-cms/qryManuscriptByFwjg'), method='POST',
                             callback=self.parse_list, body=data, meta=meta, headers=self.headers)

    def parse_list(self, response, **kwargs):
        meta = response.meta
        data = meta['data']
        prev_item = response.meta.get('item')
        json_datas = response.json()
        elem_list = json_datas['results']
        for elem in elem_list:
            item = Item()
            item['source_url'] = elem['url']
            item['publish_date'] = elem['publishedTime']
            item['title'] = elem['title']
            item['source_url'] = response.urljoin(item['source_url'])

            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            print(data, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if response.meta.get("is_next") is not False:
            total_page = json_datas['totalPage']
            print(meta['fwjg'], 'total', total_page, json_datas['totalCount'])
            meta.update({'item': prev_item, 'is_next': False})
            for page_num in range(2, int(total_page) + 1):
                data['pageNum'] = page_num
                body = json.dumps(data, separators=(',', ':'))
                yield scrapy.Request(response.url, method='POST',
                                     callback=self.parse_list, body=body, meta=meta, headers=self.headers)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        # item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get() or response.xpath("/title/text()").get()
        # item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").re_first(r"\d{4}-\d{2}-\d{2}")
        print(item['publish_date'])
        item['content'] = response.xpath(""".""").get()
        # item['source'] = self.source
        # print(item['content'])
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sichuan_abazhou_gk_policy".split())
