#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 16:06
# @Author  : 王凯
# @File    : jiangxi_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode


class JiangxiIllegalSpider(BaseTaxIllegalSpider):
    name: str = "jiangxi_illegal"
    province: str = "江西"
    url: str = "https://jiangxi.chinatax.gov.cn/taxmap/front/result.do"  # 取首页

    def start_requests(self):
        url = "https://jiangxi.chinatax.gov.cn/taxmap/front/result.do"
        data = {
            "region": "",
            "nature": "",
            "name": "",
            "fddbrzjhm": "",
            "year": "",
            "taxpayer": "",
            "identify": "",
            "zcdz": "",
            "zjjgdm": "",
            "cwfzrzjmc": "",
        }
        yield self.FormRequest(url, formdata=data, callback=self.parse)
        yield from self.etax_search(**{"etax_url": "https://etax.jiangxi.chinatax.gov.cn:8443"})

    def parse(self, response, **kwargs):
        url = "https://jiangxi.chinatax.gov.cn/taxmap/front/result2.do"
        page_num = response.xpath(".").re_first(r"共\s*(\d+)\s*页")
        if page_num and int(page_num) >= 1:
            for page in range(1, int(page_num) + 1):
                params = {"region": "", "nature": "", "year": "", "pageno": f"{page}"}
                yield self.Request(url + "?" + urlencode(params), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        info = response.xpath('//div[@class="list32"]')
        for i in info:
            iid = i.xpath("./@onclick").re_first(r"getDetail\((\d+)\)")
            url = f"https://jiangxi.chinatax.gov.cn/taxmap/front/getdetail.do?iid={iid}"
            yield self.Request(url, callback=self.parse_detail_info)

    def parse_detail_info(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        info = []
        for i in response.xpath("//table//tr"):
            res = [i.xpath(".//th").xpath("string(.)").get(), i.xpath(".//td").xpath("string(.)").get().strip()]
            if res[0]:
                info.append(res)
        info = dict(info)
        item.province = self.province

        mapping = {
            "纳税人名称": "company_name",
            "纳税人识别号": "taxpayer_id",
            "组织机构代码": "org_code",
            "注册地址": "address",
            "法定代表人或者负责人姓名": "legal_representative",
            # "违法期间法人代表或者负责人姓名": "illegal_legal_representative",
            "负有直接责任的财务负责人姓名": "resp_financial",
            # "实际负责人姓名": "resp_person",
            "负有直接责任的中介机构信息": "resp_intermediary",
            "案件性质": "illegal_status",
            "主要违法事实": "illegal_facts",
        }

        for k, v in info.items():
            for key in mapping.keys():
                if key in k:
                    if mapping.get(key) in [
                        "legal_representative",
                        "illegal_legal_representative",
                        "resp_financial",
                        "resp_person",
                    ]:
                        if ",,:" not in v:
                            setattr(item, mapping.get(key), v)
                    else:
                        setattr(item, mapping.get(key), v)

        illegal_facts_src = item.illegal_facts
        illegal_facts = re.findall(
            r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
        )
        illegal_facts = illegal_facts[0] if illegal_facts else ""
        if illegal_facts:
            basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
        else:
            illegal_facts = illegal_facts_src
            basis_and_punishment = ""
        item.basis_and_punishment = basis_and_punishment
        item.illegal_facts = illegal_facts
        year = re.findall(r"((?:20|19)\d{2})[-.年 /]", item.illegal_facts)
        if year:
            item.year = year[0]
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "jiangxi_illegal"])
