#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 11:26
# @Author  : 王凯
# @File    : shaanxi_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider


class HeilongjiangIllegalSpider(BaseTaxIllegalSpider):
    name: str = "heilongjiang_illegal"
    province: str = "黑龙江"
    url: str = "http://heilongjiang.chinatax.gov.cn/module/jslib/bulletin/zdss.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.heilongjiang.chinatax.gov.cn:8443"})
        url = "http://heilongjiang.chinatax.gov.cn/module/jslib/bulletin/ajaxfors.jsp"
        data = {
            "tablename": "jcms_141",
            "nsrmc": "",
            "nsrsbh": "",
            "zcdz": "",
            "zzjgdm": "",
            "fddbrmc": "",
            "fddbrsfzhm": "",
            "cwfzrmc": "",
            "cwfzrsfzhm": "",
        }
        yield self.FormRequest(url, formdata=data, callback=self.parse)

    def parse(self, response, **kwargs):
        url_list = response.xpath("//tr//a/@href").getall()
        for url in url_list:
            yield self.Request(url, callback=self.parse_detail_info)

    def parse_detail_info(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        item.province = self.province
        info = []
        for i in response.xpath('//table//tr'):
            res = [i.xpath("string(.)").get().strip() for i in i.xpath(".//td")]
            if res:
                if len(res) != 2:
                    continue
                info.append(res)

        info = dict(info)
        mapping = {
            "纳税人名称": "company_name",
            "纳税人识别号或社会信用代码": "taxpayer_id",
            "组织机构代码": "org_code",
            "注册地址": "address",
            "法定代表人或者负责人姓名、性别及身份证号码（或者其他证件号码）": "legal_representative",
            "违法期间法人代表或者负责人姓名、性别及身份证号码（或者其他证件号码）": "illegal_legal_representative",
            "负有直接责任的财务人员姓名、性别及身份证号码（或者其他证件号码）": "resp_financial",
            "实际负责人姓名、性别及身份证号码（或者其他证件号码）": "resp_person",
            "负有直接责任的中介机构信息": "resp_intermediary",
            "案件性质": "illegal_status",
            "主要违法事实相关法律依据及税务处理处罚情况": "illegal_facts",
        }

        for k, v in info.items():
            if mapping.get(k):
                setattr(item, mapping.get(k), re.sub(r"(<.*?>)", "", re.sub(r"(<!--.*?-->)", "", v)).strip())
        illegal_facts_src = info.get("主要违法事实相关法律依据及税务处理处罚情况", "")
        illegal_facts_src = re.sub(r"(<.*?>|\s*|“|”)", "", re.sub(r"(<!--.*?-->)", "", illegal_facts_src))
        illegal_facts = re.findall(
            r"(.*(?:主要存在|存在以下问题|发现)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
        )
        illegal_facts = illegal_facts[0] if illegal_facts else ""
        if illegal_facts:
            basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
        else:
            illegal_facts = illegal_facts_src
            basis_and_punishment = ""
        item.basis_and_punishment = basis_and_punishment
        item.illegal_facts = illegal_facts
        pub_date = response.xpath('//*[@name="PubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "heilongjiang_illegal"])
