#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 14:20
# @Author  : 王凯
# @File    : jiangsu_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider


class JiangSuIllegalSpider(BaseTaxIllegalSpider):
    name: str = "jiangsu_illegal"
    province: str = "江苏"
    url: str = "https://jiangsu.chinatax.gov.cn/module/jslib/bulletin3/lpindex.html"  # 取首页

    def start_requests(self):
        url = "https://jiangsu.chinatax.gov.cn/module/jslib/bulletin3/lpajaxfors.jsp"
        data = {
            "pageSize": "11",
            "pageNo": "1",
            "name": "",
            "nsrsbh": "",
            "zcdz": "",
            "swjg": "",
            "lparea": "",
            "year": "",
            "month": "",
            "fdname": "",
            "fdcard": "",
        }
        yield self.FormRequest(url, formdata=data, callback=self.parse, method="POST", cb_kwargs={"pageNo": 1})
        yield from self.etax_search(**{"etax_url": "https://etax.jiangsu.chinatax.gov.cn:8443"})

    def parse(self, response, **kwargs):
        yield from self.parse_list(response, **kwargs)
        datas = response.json()["data"]
        if datas:
            url = "https://jiangsu.chinatax.gov.cn/module/jslib/bulletin3/lpajaxfors.jsp"
            pageNo = kwargs.get("pageNo") + 1
            data = {
                "pageSize": "11",
                "pageNo": str(pageNo),
                "name": "",
                "nsrsbh": "",
                "zcdz": "",
                "swjg": "",
                "lparea": "",
                "year": "",
                "month": "",
                "fdname": "",
                "fdcard": "",
            }
            yield self.FormRequest(
                url, formdata=data, callback=self.parse, verify=False, method="POST", cb_kwargs={"pageNo": pageNo}
            )

    def parse_list(self, response, **kwargs):
        datas = response.json()["data"]
        for data in datas:
            url = data.get("url")
            yield self.Request(url, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        # 解析详情页
        tr_list = response.xpath("//table//tr")
        zh_info = {}
        zh_mapping = {
            "company_name": ["纳税人名称"],
            "address": ["注册地址"],
            "taxpayer_id": ["或社会统一信用代码"],
            "org_code": ["或社会统一信用代码"],
            "legal_representative": [],
            "illegal_legal_representative": ["违法行为发生时的法定代表人姓名"],
            "resp_financial": [],
            "resp_financial_head": ["负有直接责任的财务负责人信息"],
            "resp_person": ["经法院裁判确定实际责任人姓名"],
            "resp_intermediary": [],
            "illegal_status": ["案件性质"],
            "illegal_facts": ["主要违法事实"],
            "basis_and_punishment": ["相关法律依据及税务处理处罚情况"],
        }
        for tr in tr_list:
            name = tr.xpath("string(./td[1])").get().strip()
            value = tr.xpath("string(./td[2])").get().strip()
            zh_info[name] = value

        ret = {"province": self.province}
        for key, val in zh_mapping.items():
            for name, value in zh_info.items():
                if val and any(c in name for c in val):
                    ret[key] = value
                if key == "basis_and_punishment":
                    _reason = re.findall(r"(依照.*)", value)
                    if _reason:
                        ret[key] = _reason[0]
                if key == "year":
                    ret[key] = value[:4]
        item = NetTaxIllegalProItem(**ret)
        pub_date = response.xpath('//*[@name="PubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")

        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "jiangsu_illegal"])
