#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 15:45
# @Author  : 王凯
# @File    : fujian_illegal.py
# @Project : spider-man
import json
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import parse_url_params, urlencode


class FujianIllegalSpider(BaseTaxIllegalSpider):
    name: str = "fujian_illegal"
    province: str = "福建"
    url: str = "http://fujian.chinatax.gov.cn/bsfw/zdss/"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.fujian.chinatax.gov.cn:8443"})
        url = "http://fujian.chinatax.gov.cn/was5/web/search"
        params = {
            "channelid": "291316",
            "templet": "zdaj.jsp",
            "sortfield": "-datefor",
            # "classsql": "",
            "r": "0.7451214144218865",
            "prepage": "8",
            "page": "1",
        }
        yield self.Request(url + "?" + urlencode(params), callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        root_url, params = parse_url_params(response.url)
        data = json.loads(re.sub(r"\s*", "", response.body.decode("utf-8-sig")))
        yield from self.parse_detail(response, **kwargs)
        total_record = data.get("count", 1)
        if total_record and int(total_record) >= 1:
            for i in range(2, int(total_record) // 8 + 2):
                params = {**params, "page": str(i)}
                yield self.Request(root_url + "?" + urlencode(params), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        data = json.loads(re.sub(r"\s*", "", response.body.decode("utf-8-sig")))
        data = [i for i in data.get("docs", []) if i.get("channelid")]
        for info in data:
            url = info.get("url")
            if url:
                yield self.Request(url, callback=self.parse_detail_info)

    def parse_detail_info(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        info = []
        for i in response.xpath('//table[@cellspacing="1"]//tr'):
            data = [x.xpath("string(.)").get().strip() for x in i.xpath(".//td")]
            if any(data):
                info.append(data)
        info = dict(info)

        mapping = {
            "纳税人名称": "company_name",
            "纳税人识别号": "taxpayer_id",
            "组织机构代码": "org_code",
            "注册地址": "address",
            "法定代表人或者负责人姓名": "legal_representative",
            "违法期间法人代表或者负责人姓名、性别及身份证号码（或其他证件号码）": "illegal_legal_representative",
            "负有直接责任的财务人员姓名": "resp_financial",
            "实际负责人姓名": "resp_person",
            "负有直接责任的中介机构信息": "resp_intermediary",
            "案件性质": "illegal_status",
            "主要违法事实": "illegal_facts",
        }

        for k, v in info.items():
            for key in mapping.keys():
                if key in k:
                    v = re.sub(r"(<!--.*?-->)", "", v)
                    v = re.sub(r"(<.*?>)", "", v).strip()
                    v = re.sub(r"\s*", "", v).strip()
                    if mapping.get(key) in [
                        "legal_representative",
                        "illegal_legal_representative",
                        "resp_financial",
                        "resp_person",
                    ]:
                        if any(re.findall(r"姓名：(.*?)；", v)):
                            setattr(item, mapping.get(key), v)
                    else:
                        setattr(item, mapping.get(key), v)

        illegal_facts_src = item.illegal_facts
        illegal_facts_src = re.sub(r"(<!--.*?-->)", "", illegal_facts_src)
        illegal_facts_src = re.sub(r"(<script.*?>.*?</script.*?>)", "", illegal_facts_src)
        illegal_facts_src = re.sub(r"(<.*?>|\s*|“|”)", "", illegal_facts_src)
        illegal_facts = re.findall(
            r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
        )
        illegal_facts = illegal_facts[0] if illegal_facts else ""
        if illegal_facts:
            basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
        else:
            illegal_facts = illegal_facts_src
            basis_and_punishment = ""
        item.basis_and_punishment = basis_and_punishment
        item.illegal_facts = illegal_facts
        item.province = self.province
        pub_date = response.xpath('//*[@name="PubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "fujian_illegal"])
