# -*- coding: utf-8 -*-
# @Time   : 2023/9/13 14:56
# @Author : 大龙 🚀
# @File   : gansu_illegal.py

import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode, parse_url_params


class GansuiIllegalSpider(BaseTaxIllegalSpider):
    name: str = "gansu_illegal"
    province: str = "甘肃省"
    url: str = "http://gansu.chinatax.gov.cn/col/col8350/index.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.gansu.chinatax.gov.cn:8443"})
        url = "http://gansu.chinatax.gov.cn/module/search/index.jsp"
        params = {
            "field_849": "",
            "field_850": "",
            "field_857": "",
            "field_868": "",
            "field_867": "",
            "field_860": "",
            "field_851": "",
            "field_852": "",
            "field_855": "",
            "field_866": "",
            "field_856": "",
            "field_865": "",
            "strSelectID": "849,850,868,857,867,860,851,852,855,866,865,856",
            "i_columnid": "8350",
            "field": "field_849:1:1,field_850:1:1,field_851:1:1,field_852:1:1,field_857:1:1,field_860:1:1,field_867:1:1,field_868:1:1,field_855:1:1,field_866:1:1,field_865:1:1,field_856:1:1",
            "initKind": "FieldForm",
            "type": "1,1,1,1,1,1,1,1,1,1,1,1",
            "currentplace": "",
            "splitflag": "",
            "fullpath": "0",
            "download": "查询",
            "currpage": "1",
        }
        yield self.Request(url + "?" + urlencode(params), callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        root_url, params = parse_url_params(response.url)
        yield from self.parse(response, **kwargs)  # 第一页解析
        current_page = kwargs.get("current_page", 1)
        total_page = int(response.xpath(".").re_first(r"共\s*(\d+)\s*页"))
        if current_page < total_page:
            current_page = current_page + 1
            yield self.Request(
                root_url + "?" + urlencode({**params, "currpage": str(current_page)}),
                callback=self.parse_list,
                cb_kwargs=dict(current_page=current_page),
            )

    def parse(self, response, **kwargs):
        li_list = response.xpath("//table//table")
        for li in li_list:
            url = li.xpath(".//a/@href").get()
            yield response.follow(url, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        info = []
        response = response.replace(body=re.sub(r"(<!--.*?-->)", "", response.text, flags=re.S).encode("utf-8"))
        for i in response.xpath('//table[@class="zdsc_con"]//tr'):
            res = [i.xpath("string(.)").get().strip() for i in i.xpath(".//td")]
            if res:
                if len(res) != 2:
                    continue
                info.append(res)

        item = NetTaxIllegalProItem()
        item.province = self.province

        for k, *v in info:
            v = " ".join(v)
            k = re.sub(r"[^\u4e00-\u9fa5]+", "", k)
            if "纳税人名称" in k:
                item.company_name = v
            if "纳税人识别号" in k:
                item.taxpayer_id = v
            if "组织机构代码" in k:
                item.org_code = v
            if "注册地址" in k:
                item.city = item.resolve_register_address(v)[0]
                item.country = item.resolve_register_address(v)[1]
                item.address = v
            if "法定代表人或者负责人姓名" in k:
                item.legal_representative = v
            if "负有直接责任的中介机构" in k:
                item.resp_intermediary = v
            if "负有直接责任的财务负责人姓名" in k:
                item.resp_financial = v
            if "实际责任人姓名" in k:
                item.resp_person = v
            if "案件性质" in k:
                item.illegal_status = v
            if "主要违法事实" in k:
                illegal_facts_src = v
                illegal_facts = re.findall(
                    r"(.*(?:主要存在|存在以下问题|发现其在)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+",
                    illegal_facts_src,
                )
                illegal_facts = illegal_facts[0] if illegal_facts else ""
                if illegal_facts:
                    basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
                else:
                    illegal_facts = illegal_facts_src
                    basis_and_punishment = ""
                item.basis_and_punishment = basis_and_punishment
                item.illegal_facts = illegal_facts

        pub_date = response.xpath('//*[@name="pubdate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")

        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "gansu_illegal"])
