#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 15:27
# @Author  : 王凯
# @File    : anhui_illegal.py
import re

import scrapy

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider


class AnhuiIllegalSpider(BaseTaxIllegalSpider):
    name: str = "anhui_illegal"
    province: str = "安徽"
    url: str = "http://anhui.chinatax.gov.cn/module/jslib/bulletin/index.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.anhui.chinatax.gov.cn:8443"})
        url = "https://anhui.chinatax.gov.cn/module/jslib/bulletin/ajaxfors.jsp"
        data = {"tablename": "jcms_17", "title": ",", "key": "-1,"}
        yield scrapy.FormRequest(url, formdata=data, callback=self.parse)

    def parse(self, response, **kwargs):
        url_list = response.xpath("//a/@href").getall()
        for url in url_list:
            yield self.Request(url, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        info = []

        for i in response.xpath("//table//tr"):
            res = i.re(r"<td align=\"right\".*?>\s*<.*?>\s*(.*?)<.*?>\s*</td>\s*<td align=\"left\".*?>([\w\W]*?)</td>")
            if res:
                info.append(res)

        info = dict(info)
        item.province = self.province

        mapping = {
            "纳税人名称": "company_name",
            "纳税人识别号": "taxpayer_id",
            "组织机构代码": "org_code",
            "注册地址": "address",
            "法定代表人或负责人姓名、性别及身份证号码（或其他证件号码）": "legal_representative",
            "违法期间法人代表或者负责人姓名、性别及身份证号码（或其他证件号码）": "illegal_legal_representative",
            "负有直接责任的财务人员姓名、性别及身份证号码（或其他证件号码）": "resp_financial",
            "实际负责人姓名、性别及身份证号码（或其他证件号码）": "resp_person",
            "负有直接责任的中介机构信息": "resp_intermediary",
            "案件性质": "illegal_status",
        }

        for k, v in info.items():
            if mapping.get(k):
                v = re.sub(r"(<!--.*?-->)", "", v)
                v = re.sub(r"(<script.*?>.*?</script.*?>)", "", v)
                setattr(item, mapping.get(k), re.sub(r"(<.*?>)", "", v).strip())
        illegal_facts_src = info.get("主要违法事实", "")
        illegal_facts_src = re.sub(r"(<!--.*?-->)", "", illegal_facts_src)
        illegal_facts_src = re.sub(r"(<script.*?>.*?</script.*?>)", "", illegal_facts_src)
        illegal_facts_src = re.sub(r"(<.*?>|\s*|“|”)", "", illegal_facts_src)
        illegal_facts = re.findall(
            r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
        )
        illegal_facts = illegal_facts[0] if illegal_facts else ""
        if illegal_facts:
            basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
        else:
            illegal_facts = illegal_facts_src
            basis_and_punishment = ""
        item.basis_and_punishment = basis_and_punishment
        item.illegal_facts = illegal_facts
        pub_date = response.xpath('//*[@name="PubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "anhui_illegal"])
