#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 11:26
# @Author  : 王凯
# @File    : shaanxi_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode


class LiaoningIllegalSpider(BaseTaxIllegalSpider):
    name: str = "liaoning_illegal"
    province: str = "辽宁"
    url: str = "https://liaoning.chinatax.gov.cn/col/col5883/index.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.liaoning.chinatax.gov.cn:8443"})
        url = "https://liaoning.chinatax.gov.cn/module/search/index.jsp"
        params = {
            "a": "",
            "b": "",
            "strSelectID": "1754,1755",
            "i_columnid": "5883",
            "field": "a:1:1,b:1:1",
            "initKind": "FieldForm",
            "type": "1,1",
            "currentplace": "",
            "splitflag": "",
            "fullpath": "0",
            "currpage": "1",
        }
        yield self.Request(url + "?" + urlencode(params), callback=self.parse)

    def parse(self, response, **kwargs):
        yield from self.parse_list(response, **kwargs)
        page_num = kwargs.get("page_num") or 1
        page_total_num = response.xpath(".").re_first(r"共\s*(\d+)\s*页")
        if page_num < int(page_total_num):
            page_num = page_num + 1
            url = "https://liaoning.chinatax.gov.cn/module/search/index.jsp"
            params = {
                "a": "",
                "b": "",
                "strSelectID": "1754,1755",
                "i_columnid": "5883",
                "field": "a:1:1,b:1:1",
                "initKind": "FieldForm",
                "type": "1,1",
                "currentplace": "",
                "splitflag": "",
                "fullpath": "0",
            }
            yield self.Request(
                url + "?" + urlencode({**params, "currpage": str(page_num)}),
                callback=self.parse,
                cb_kwargs={"page_num": page_num},
            )

    def parse_list(self, response, **kwargs):
        url_list = response.xpath("//tr//a/@href").getall()
        for url in url_list:
            yield response.follow(url, callback=self.parse_detail_info)

    def parse_detail_info(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        item.province = self.province

        response = response.replace(
            body=re.sub(r"(<!--.*?-->|<script>.*?</script>)", "", response.text, flags=re.S).encode("utf-8")
        )
        info = []
        for i in response.xpath("//table//tr"):
            res = [i.xpath("string(.)").get().strip() for i in i.xpath(".//td")]
            if res:
                if len(res) != 2:
                    continue
                info.append(res)
        info = dict(info)

        for k, v in info.items():
            if "纳税人名称" in k:
                item.company_name = v
            if "纳税人识别号" in k:
                item.taxpayer_id = v
            if "组织机构代码" in k:
                item.org_code = v
            if "注册地址" in k:
                item.city = item.resolve_register_address(v)[0]
                item.country = item.resolve_register_address(v)[1]
                item.address = v
            if "法定代表人或者负责人姓名" in k:
                item.legal_representative = v
            if "负有直接责任的中介机构" in k:
                item.resp_intermediary = v
            if "负有直接责任的财务负责人姓名" in k:
                item.resp_financial = v
            if "实际责任人姓名" in k:
                item.resp_person = v
            if "案件性质" in k:
                item.illegal_status = v
            if "主要违法事实" in k:
                illegal_facts_src = v
                illegal_facts = re.findall(
                    r"(.*(?:主要存在|存在以下问题|发现其在)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+",
                    illegal_facts_src,
                )
                illegal_facts = illegal_facts[0] if illegal_facts else ""
                if illegal_facts:
                    basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
                else:
                    illegal_facts = illegal_facts_src
                    basis_and_punishment = ""
                item.basis_and_punishment = basis_and_punishment
                item.illegal_facts = illegal_facts

        pub_date = response.xpath('//*[@name="PubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")

        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "liaoning_illegal"])
