#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/15 15:19
# @Author  : 王凯
# @File    : xizhang_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode


class XizhangIllegalSpider(BaseTaxIllegalSpider):
    name = "xizang_illegal"
    province: str = "西藏"
    url: str = "https://xizang.chinatax.gov.cn/col/col2371/index.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.xizang.chinatax.gov.cn:8443"})
        url = "https://xizang.chinatax.gov.cn/module/search/index.jsp"
        params = {
            "vc_name": "",
            "field_406": "",
            "field_407": "",
            "field_408": "",
            "strSelectID": "style_315,406,407,408",
            "i_columnid": "style_63",
            "field": "vc_name:1,field_406:1,field_407:1,field_408:1",
            "initKind": "FieldFormMetadata",
            "currentplace": "",
            "splitflag": "",
            "fullpath": "0",
            "download": "查询",
            "currpage": "1"
        }
        yield self.Request(url + "?" + urlencode(params), method="GET", callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        url = "https://xizang.chinatax.gov.cn/module/search/index.jsp"
        page_num = response.xpath("//form/div/div/span[@style]/text()").re_first(r"(\d+)")
        if page_num:
            yield from self.parse_page(response, **kwargs)
            page_num = int(page_num[0])
            for page in range(2, page_num + 1):
                params = {
                    "vc_name": "",
                    "field_406": "",
                    "field_407": "",
                    "field_408": "",
                    "strSelectID": "style_315,406,407,408",
                    "i_columnid": "style_63",
                    "field": "vc_name:1,field_406:1,field_407:1,field_408:1",
                    "initKind": "FieldFormMetadata",
                    "currentplace": "",
                    "splitflag": "",
                    "fullpath": "0",
                    "download": "查询",
                    "currpage": page
                }
                yield self.Request(url + "?" + urlencode(params), callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        form_list = response.xpath('//tr[@class="form-list"]')
        for tr in form_list:
            item = NetTaxIllegalProItem()
            item["company_name"] = tr.xpath("./td")[0].root.text
            item["taxpayer_id"] = tr.xpath("./td")[1].root.text
            item["illegal_status"] = tr.xpath("./td")[2].root.text
            yield response.follow(
                tr.xpath("./td/a/@href").get(),
                method="GET", callback=self.parse_detail, cb_kwargs={"item": item}
            )

    @staticmethod
    def reg_one(reg, text):
        ret = re.findall(reg, text)
        if ret:
            return ret[0]
        return ""

    def parse_detail(self, response, **kwargs):
        text = response.xpath('//*[@id="zoom"]').get()
        item = kwargs['item']
        item.province = self.province
        reg = r"<!--<\$\[{name}\]>begin-->([\w\W]+)<!--<\$\[{name}\]>end-->"
        item["legal_representative"] = self.reg_one(reg.format(name="法定代表人姓名性别及身份证号码"), text)
        item["resp_financial"] = self.reg_one(reg.format(name="财务负责人身份证号"), text)
        item["org_code"] = self.reg_one(reg.format(name="组织机构代码"), text)
        item["address"] = self.reg_one(reg.format(name="注册地址"), text)
        item["illegal_facts"] = self.reg_one(
            r"主要违法事实和相关法律依据：\s*(.*。)\s*", response.xpath('//*[@id="zoom"]').xpath("string()").get()
        )
        year = re.findall(r"((?:20|19)\d{2})[-.年 /]", item.illegal_facts)
        if year:
            item.year = year[0]

        illegal_facts_src = item.illegal_facts
        illegal_facts = re.findall(r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src)
        illegal_facts = illegal_facts[0] if illegal_facts else ""
        if illegal_facts:
            basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
        else:
            illegal_facts = illegal_facts_src
            basis_and_punishment = ""
        item.basis_and_punishment = basis_and_punishment
        item.illegal_facts = illegal_facts
        pub_date = response.xpath('//*[@name="pubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "xizang_illegal"])
