#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/15 11:05
# @Author  : 王凯
# @File    : sichuan_illegal.py
# @Project : spider-man
import re

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode


class SichuanIllegalSpider(BaseTaxIllegalSpider):
    name = "sichuan_illegal"
    province: str = "四川"
    url: str = "https://sichuan.chinatax.gov.cn/col/col14253/index.html"  # 取首页

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.sichuan.chinatax.gov.cn:8443"})
        url = "https://sichuan.chinatax.gov.cn/module/search/index.jsp"
        params = {
            "field": "vc_name:1:0,field_1262:1:0,field_1263:1:0,field_1264:1:0,field_1265:1:0,field_1266:1:0,field_1277:12:0,field_1278:12:0,field_1279:12:0,field_1280:12:0,field_1281:12:0",
            "i_columnid": "20038",
            "vc_name": "",
            "field_1262": "",
            "field_1263": "",
            "field_1264": "",
            "field_1265": "",
            "field_1266": "",
            "field_1277": "",
            "field_1278": "",
            "field_1279": "",
            "field_1280": "",
            "field_1281": "",
            "currpage": "1",
        }
        yield self.Request(url + "?" + urlencode(params), callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        currpage = kwargs.get("currpage", 1)
        url = "https://sichuan.chinatax.gov.cn/module/search/index.jsp"
        params = {
            "field": "vc_name:1:0,field_1262:1:0,field_1263:1:0,field_1264:1:0,field_1265:1:0,field_1266:1:0,field_1277:12:0,field_1278:12:0,field_1279:12:0,field_1280:12:0,field_1281:12:0",
            "i_columnid": "20038",
            "vc_name": "",
            "field_1262": "",
            "field_1263": "",
            "field_1264": "",
            "field_1265": "",
            "field_1266": "",
            "field_1277": "",
            "field_1278": "",
            "field_1279": "",
            "field_1280": "",
            "field_1281": "",
            "currpage": "1",
        }
        yield from self.parse(response, **kwargs)  # 第一页解析
        pages = response.xpath(".").re_first(r"共\s*(\d+)\s*页")
        if pages:
            if int(pages) > currpage:
                currpage = currpage + 1
                yield self.Request(
                    url + "?" + urlencode({**params, "currpage": str(currpage)}),
                    callback=self.parse_list,
                    cb_kwargs={"currpage": currpage},
                )

    def parse(self, response, **kwargs):
        li_list = response.xpath("//td")
        for li in li_list:
            url = li.xpath(".//a/@href").get()
            if url:
                yield response.follow(url, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        item = NetTaxIllegalProItem()
        item.province = self.province

        info = []
        for i in response.xpath("//table//tr"):
            res = [i.xpath("string(.)").get().strip() for i in i.xpath(".//td")]
            if res:
                if len(res) != 2:
                    continue
                info.append(res)

        for k, *v in info:
            v = " ".join(v)
            if "失信主体名称" in k:
                v = re.sub(r"(<!--.*?-->|<.*?>|\s*)", "", v, flags=re.S)
                item.company_name = v
            if "失信主体识别号" in k:
                v = re.sub(r"(<!--.*?-->|<.*?>|\s*)", "", v, flags=re.S)
                item.taxpayer_id = v
            if "组织机构代码" in k:
                v = re.sub(r"(<!--.*?-->|<.*?>|\s*)", "", v, flags=re.S)
                item.org_code = v
            if "注册地址" in k:
                v = re.sub(r"(<!--.*?-->|<.*?>|\s*)", "", v, flags=re.S)
                item.address = v
            if "法定代表人或者负责人" in k:
                v = re.sub(r"\s*", "", v)
                item.legal_representative = v
            if "违法期间法人代表或者负责人姓名" in k:
                v = re.sub(r"(<.*?>.*<.*?>|\s*)", "", v, flags=re.S)
                v = " ".join(re.sub(r"(<!--.*?-->|<.*?>)", "", v, flags=re.S).split())
                item.illegal_legal_representative = v.strip("...")
            if "负有直接责任的财务人员姓名" in k:
                v = " ".join(re.sub(r"(<!--.*?-->|<.*?>)", "", v, flags=re.S).split())
                item.resp_financial = v
            if "实际责任人姓名" in k:
                v = " ".join(re.sub(r"(<!--.*?-->|<.*?>)", "", v, flags=re.S).split())
                item.resp_person = v
            if "负有直接责任的中介机构信息" in k:
                v = " ".join(re.sub(r"(<!--.*?-->|<.*?>)", "", v, flags=re.S).split()).replace("null", "")
                item.resp_intermediary = v
            if "主体违法性质" in k:
                item.illegal_status = v
            if "主要违法事" in k:
                v = re.sub(r"(<!--.*?-->|\s*|<.*?>)", "", v, flags=re.S)
                illegal_facts_src = re.sub(r"(<.*?>|\s*|“|”)", "", re.sub(r"(<!--.*?-->)", "", v))
                illegal_facts = re.findall(
                    r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
                )
                illegal_facts = illegal_facts[0] if illegal_facts else ""
                if illegal_facts:
                    basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
                else:
                    illegal_facts = illegal_facts_src
                    basis_and_punishment = ""
                item.basis_and_punishment = basis_and_punishment
                item.illegal_facts = illegal_facts
        pub_date = response.xpath('//*[@name="PubDate"]/@content').get()
        item.year = pub_date[:10].replace("-", "")
        yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "sichuan_illegal"])
