#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 17:22
# @Author  : 王凯
# @File    : hubei_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode


class HuBeiIllegalSpider(BaseTaxIllegalSpider):
    name = "hubei_illegal"
    province: str = "湖北"
    url: str = "http://hubei.chinatax.gov.cn/hbsw/zdsswfaj.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.hubei.chinatax.gov.cn:8443"})
        url = "https://etax.hubei.chinatax.gov.cn/webroot/gzcxAction.do"
        params = {
            "method": "zdsswfajcx",
            "page": "1",
            "limit": "15",
            "nsrsbh": "",
            "zzjgdm": "",
            "nsrmc": "",
            "fddbrmc": "",
            "fddbrzjh": "",
            "cwfzrmc": "",
            "cwfzrzjh": "",
            "nsrlx": "",
            "ds": "",
            "zcdz": "",
            "ajxz": "",
            "ssnd": "",
        }
        yield self.Request(url + "?" + urlencode(params), callback=self.parse_list, cb_kwargs=dict(params=params))

    def parse_list(self, response, **kwargs):
        total_record = response.json().get("count", 1)
        url = "https://etax.hubei.chinatax.gov.cn/webroot/gzcxAction.do"
        if total_record and int(total_record) >= 1:
            yield from self.parse_detail(response, **kwargs)
            for i in range(2, int(total_record) // 15 + 2):
                yield self.Request(
                    url + "?" + urlencode({**kwargs.get("params"), "page": str(i)}), callback=self.parse_detail
                )

    def parse_detail(self, response, **kwargs):
        data = response.json().get("data", [])
        for info in data:
            item = NetTaxIllegalProItem()
            mapping = {
                "NSRMC": "company_name",
                "NSRSBH": "taxpayer_id",
                "ZZJG": "org_code",
                "ZCDZ": "address",
                "FDDBR": "legal_representative",
                "WFQJ_FDDBR": "illegal_legal_representative",
                "CWFZR": "resp_financial",
                "JJZRR": "resp_person",
                "ZZJGRY": "resp_intermediary",
                "AJXZ": "illegal_status",
                "DIS": "city",
            }

            for k, v in info.items():
                if mapping.get(k):
                    setattr(item, mapping.get(k), v)

            illegal_facts_src = info.get("WFSS", "")
            illegal_facts_src = re.sub(r"(<.*?>|\s*|“|”)", "", re.sub(r"(<!--.*?-->)", "", illegal_facts_src))
            illegal_facts = re.findall(
                r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
            )
            illegal_facts = illegal_facts[0] if illegal_facts else ""
            if illegal_facts:
                basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
            else:
                illegal_facts = illegal_facts_src
                basis_and_punishment = ""
            item.basis_and_punishment = basis_and_punishment
            item.illegal_facts = illegal_facts
            item.province = self.province
            item.year = info["LSH"][:6] + "01"
            yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "hubei_illegal"])
