#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/14 10:19
# @Author  : 王凯
# @File    : hainan_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import parse_url_params, urlencode


class HainanIllegalSpider(BaseTaxIllegalSpider):
    name: str = "hainan_illegal"
    province: str = "海南"
    url: str = "https://hainan.chinatax.gov.cn/bsfw_5_8/"  # 取首页

    def start_requests(self):
        url = "https://hainan.chinatax.gov.cn/weifaCase/weifa_case_list.htm?pageNo=1"
        data = {
            "area": "",
            "ajinformation": "",
            "startDate": "",
            "month": "",
            "nsrname": "",
            "nsridentify": "",
            "legal": "",
            "duringLegal": "",
        }
        yield self.FormRequest(url, formdata=data, callback=self.parse_list, cb_kwargs=dict(data=data))
        yield from self.etax_search(**{"etax_url": "https://etax.hainan.chinatax.gov.cn:8443"})

    def parse_list(self, response, **kwargs):
        yield from self.parse_manuscript(response, **kwargs)
        if "暂时没有数据" not in response.text:
            root_url, params = parse_url_params(response.url)
            yield self.FormRequest(
                root_url + "?" + urlencode({"pageNo": int(params["pageNo"]) + 1}),
                formdata=kwargs.get("data"),
                callback=self.parse_list,
                cb_kwargs=dict(data=kwargs.get("data")),
            )

    def parse_manuscript(self, response, **kwargs):
        manuscript_id_list = response.xpath("//tbody/tr/td/input/@onclick")
        for manuscript_id in manuscript_id_list:
            url = "https://hainan.chinatax.gov.cn/weifaCase/weifa_case_list.htm"
            data = {"id": manuscript_id.re_first(r"weifaCaseDetail\((.*)\)")}
            yield self.FormRequest(url, formdata=data, method="POST", callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        info = []
        for i in response.xpath('//*[@id="detailDiv"]/table/tbody/tr'):
            res = [i.xpath(".//th").xpath("string(.)").get(), i.xpath(".//td").xpath("string(.)").get()]
            if res[0]:
                info.append(res)

        info = dict(info)
        mapping = {
            "纳税人名称": "company_name",
            "统一社会信用代码（纳税人识别号）": "taxpayer_id",
            "组织机构代码": "org_code",
            "注册地址": "address",
            "法定代表人、负责人或者经法院判决确定的实际责任人的姓名、性别、证件名称及号码": "illegal_legal_representative",
            "经法院裁判确定的负有直接责任的财务人员、团伙成员的姓名、性别、证件名称及号码": "resp_financial",
            "负有直接责任的中介机构信息及其从业人员信息": "resp_intermediary",
            "案件性质": "illegal_status",
            "主要违法事实": "illegal_facts",
            "相关法律依据及税务处理、税务行政处罚等情况": "basis_and_punishment",
        }

        for k, v in info.items():
            if mapping.get(k):
                setattr(item, mapping.get(k), re.sub("(<.*?>)", "", re.sub("(<!--.*?-->)", "", v)).strip())
        item.province = self.province
        year = re.findall(r"((?:20|19)\d{2})[-.年 /]", item.illegal_facts)
        if year:
            item.year = year[0]
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "hainan_illegal"])
