import json
from typing import Iterable

import requests
import scrapy
from scrapy import Request
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings

from apps.tax_honor.tax_honor.base_clean_spider import BaseCleanMixin
from apps.tax_honor.tax_honor.items import NetTaxHonorResponseItem
from utils.tools import parse_url_params, unquote_url, urlencode


def get_area_code():
    url = "https://hqpt.jxt.zj.gov.cn/gateway/provider-zcgj-ent/smePolicyLibrary/findAreas"
    result = requests.get(
        url,
        headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.101.76 Safari/537.36"
        },
    ).json()

    res = {}
    datas = result["result"]

    for d in datas:
        res[str(d["areaId"])] = d["areaName"]
        if "children" in d:
            for c in d["children"]:
                if c["govCode"] != 0:
                    res[str(c["areaId"])] = c["areaName"]
    return res


class ZjZwfwTaxHonorSpider(scrapy.Spider, BaseCleanMixin):
    name = "zj_zwfw_tax_honor"
    area_code = {}
    url = "https://mapi.zjzwfw.gov.cn/web/mgop/gov-open/zj/2002319217/reserved/index.html#/policy-list"
    keyword = ["名单", "公示", "名单公示", "通告"]

    def start_requests(self) -> Iterable[Request]:
        self.area_code = get_area_code()
        url = "https://hqpt.jxt.zj.gov.cn/gateway/provider-zcgj-ent/smePolicyLibrary/list"
        for keyword in self.keyword:
            params = {"cityid": "", "countyid": "", "deptId": "", "title": keyword, "pageNo": "1", "pageSize": "10"}
            yield Request(url=url + "?" + urlencode(params), callback=self.parse_file)

        url = "https://hqpt.jxt.zj.gov.cn/gateway/provider-zcgj-ent/elastic/policy/summary"
        for keyword in self.keyword:
            params = {
                "policyCategory": "",
                "areaCode": "",
                "departmentId": "",
                "keywords": keyword,
                "showState": "",
                "sortColumn": "publishDate",
                "requestData": "true",
                "highLight": "true",
                "pageNo": "1",
                "pageSize": "10",
            }
            yield Request(url=url + "?" + urlencode(params), callback=self.parse_summary)

    def parse_file(self, response, **kwargs):
        resp = response.json()
        page = kwargs.get("page")
        pages = resp["result"]["pages"]
        records = resp["result"]["records"]
        province = "浙江省"
        for record in records:
            city = self.area_code.get(str(record["cityid"]))
            county = self.area_code.get(str(record["countyid"]))
            response_item = {
                "province": province,
                "city": city,
                "county": county,
                "document_title": record["title"],
                "publish_date": record["publishDate"],
                "decare_department": record["deptName"],
                "hierarchy": "区级" if record["hierarchy_dictText"] == "县（区）级" else record["hierarchy_dictText"],
                "document_url": record["attachid_files"]["fileUrl"],
                "source": self.name,
                "item_count": 0,
                "response": None,
                "attachment_url": [record["attachid_files"]["fileUrl"]] if record["attachid_files"]["fileUrl"] else [],
                "deadline": record.get("expiryDate"),
            }
            if self.check_utils.check_title_is_valid(response_item["document_title"]):
                yield from self.parse_detail(response, **{"response_item": response_item})

        if not page:
            for page in range(1, pages + 1):
                root_url, params = parse_url_params(unquote_url(response.request.url))
                params["pageNo"] = page
                new_url = root_url + "?" + urlencode(params)
                yield Request(url=new_url, callback=self.parse_file, meta={"page": page})

    def parse_summary(self, response, **kwargs):
        resp = response.json()
        page = kwargs.get("page")
        pages = resp["result"]["page"]["pages"]
        records = resp["result"]["page"]["records"]
        province = "浙江省"
        for record in records:
            city = record["cityName"]
            county = record["countyName"]
            response_item = {
                "province": province,
                "city": city,
                "county": county,
                "document_title": record["policyTitle"] or record["title"],
                "publish_date": record["publishDate"] or record["firstPassTime"],
                "decare_department": record["department"],
                "hierarchy": (
                    "区级" if record.get("hierarchy_dictText") == "县（区）级" else record.get("hierarchy_dictText")
                ),
                "document_url": f"https://mapi.zjzwfw.gov.cn/web/mgop/gov-open/zj/2002319217/reserved/index.html#/policy-list/detail?esId={record['id']}",
                "attachment_url": (
                    [record.get("policyOriginalFilePath")] if record.get("policyOriginalFilePath") else []
                ),
                "deadline": record.get("expiryDate"),
                "source": self.name,
                "item_count": 0,
                "response": json.dumps(record),
            }
            if self.check_utils.check_title_is_valid(response_item["document_title"]):
                yield from self.parse_detail(response, **{"response_item": response_item})

        if not page:
            for page in range(1, pages + 1):
                root_url, params = parse_url_params(unquote_url(response.request.url))
                params["pageNo"] = page
                new_url = root_url + "?" + urlencode(params)
                yield Request(url=new_url, callback=self.parse_summary, meta={"page": page})

    def parse_detail(self, response, **kwargs):
        response_item = kwargs["response_item"]
        yield NetTaxHonorResponseItem(**response_item)
        if response_item["attachment_url"]:
            for url in response_item["attachment_url"]:
                yield Request(
                    url=url,
                    callback=self.parse_spider_detail,
                    cb_kwargs={"response_item": response_item},
                )
        yield Request(
            url=response_item["document_url"],
            callback=self.parse_spider_detail,
            cb_kwargs={"response_item": response_item},
        )


def process_run():
    process = CrawlerProcess(get_project_settings())
    spider_list = [
        "zj_zwfw_tax_honor",
        "zj_zwfw_tax_honor_parser",
    ]
    for spider in spider_list:
        process.crawl(spider)
    process.start()


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl zj_zwfw_tax_honor".split())
    # process_run()
