import datetime
from typing import Iterable, Any

import scrapy
from scrapy import Request
from scrapy.http import Response

from apps.tax_policy_declaration.tax_policy_declaration.items import NetTaxPolicyDeclarationItem
from utils.tools import urlencode, parse_url_params, to_date


class ZjZwfwTaxPolicyDeclarationSpider(scrapy.Spider):
    name = "zj_zwfw_tax_policy_declaration"
    source = "浙江政务服务网"
    province = "浙江省"

    def start_requests(self) -> Iterable[Request]:
        url = "https://hqpt.jxt.zj.gov.cn/gateway/provider-zcgj-ent/elastic/policy/summary"
        params = {  # 待开始
            "policyCategory": "",
            "areaCode": "",
            "departmentId": "",
            "keywords": "",
            "showState": "1",
            "sortColumn": "publishDate",
            "declare": "true",
            "requestData": "true",
            "highLight": "true",
            "pageNo": "1",
            "pageSize": "10",
        }
        yield Request(url=url + "?" + urlencode(params), callback=self.parse_list)
        params2 = {  # 进行中
            "policyCategory": "",
            "areaCode": "",
            "departmentId": "",
            "keywords": "",
            "showState": "2",
            "sortColumn": "publishDate",
            "declare": "true",
            "requestData": "true",
            "highLight": "true",
            "pageNo": "1",
            "pageSize": "10",
        }
        yield Request(url=url + "?" + urlencode(params2), callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        yield from self.parse(response, **kwargs)
        root_url, params = parse_url_params(response.request.url)
        resp = response.json()
        pages = resp["result"]["page"]["pages"]
        if int(params["pageNo"]) < pages:
            yield Request(
                url=root_url + "?" + urlencode({**params, "pageNo": str(int(params["pageNo"]) + 1)}),
                callback=self.parse_list,
            )

    def parse(self, response: Response, **kwargs: Any) -> Any:
        resp = response.json()
        records = resp["result"]["page"]["records"]
        for record in records:
            declare_list = record.get("declareList")
            declaration_type = None
            ent_in_process = None
            slsj = None
            sbcl = None
            valid = 1
            if declare_list:
                declaration_type = (
                    declare_list[0].get("entInProcess")
                    if declare_list[0].get("entInProcess") in ["免申即享", "即申即享"]
                    else None
                )
                ent_in_process = (
                    scrapy.Selector(text=f'<html>{declare_list[0].get("entInProcess")}</html>').xpath("string(.)").get()
                )
                slsj = declare_list[0].get("slsj")
                sbcl = declare_list[0].get("sbcl")
            if record.get("expiryDate"):
                if to_date(record.get("expiryDate"), date_format="%Y-%m-%d") < datetime.datetime.now():
                    valid = 0
            level = "省级"
            if record.get("cityName"):
                level = "市级"
            if record.get("countyName"):
                level = "区级"
            item = NetTaxPolicyDeclarationItem(
                **{
                    "source": self.source,
                    "province": self.province,
                    "city": record.get("cityName"),  # 城市
                    "county": record.get("countyName"),  # 区县
                    "declaration_type": declaration_type,  # 申报类型
                    "page_view": record.get("clickQuantity"),  # 浏览量
                    "project_name": record.get("policyTitle") or record.get("title"),  # 项目名称
                    "enjoyment_subject": record.get("applyType"),  # 享受主体
                    "enjoyment_condition": None,  # 申报条件
                    "doc_number": record.get("code"),
                    "release_time": record.get("publishDate"),
                    "department": record.get("department"),
                    "policy_theme": record.get("policyCategoryText"),
                    "policy_basis": record.get("titleHighLight"),
                    "policy_sunnary": record.get("contentHighLight"),
                    "max_support_amount": record.get("amount"),  # 扶持最大金额
                    "content": record.get("interContent"),
                    "source_url": "https://mapi.zjzwfw.gov.cn/web/mgop/gov-open/zj/2002319217/reserved/index.html#/policy-list/detail?esId="
                    + record.get("id"),
                    "application_area": f"{self.province}"
                    + (f'-{record.get("cityName")}' if record.get("cityName") else "")
                    + (f'-{record.get("countyName")}' if record.get("countyName") else ""),
                    "administrative_level": level,  # 行政层级
                    "policy_industry": record.get("belongIndustryText"),
                    "declaration_start_time": record.get("implementDate"),
                    "application_deadline": record.get("expiryDate"),
                    "application_materials": sbcl,  # 申报该项目的所需材料
                    "handling_process": ent_in_process,  # 办理流程
                    "charging_situation": None,  # 收费情况
                    "processing_time": slsj,
                    "legal_time_limit": None,
                    "commitment_time_limit": None,
                    "declaration_address": None,  # 具体申报的线下地址
                    "consultant": None,  # 咨询申报项目的人员
                    "hotline": record.get("contact"),
                    "complaints_hotline": None,
                    "valid": valid,
                }
            )
            yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl zj_zwfw_tax_policy_declaration".split())
