#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/13 11:26
# @Author  : 王凯
# @File    : shaanxi_illegal.py
# @Project : spider-man
import base64
import datetime
import re
import time

import requests

from apps.tax_illegal.tax_illegal.items import NetTaxIllegalProItem
from apps.tax_illegal.tax_illegal.spiders import BaseTaxIllegalSpider
from utils.tools import urlencode


class ShanghaiIllegalSpider(BaseTaxIllegalSpider):
    name = "shanghai_illegal"
    province: str = "上海"
    url: str = "https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXzdsswfajgblCtrl-init.pfv#"  # 取首页
    cookies = None
    code = None

    def start_requests(self):
        yield from self.etax_search(**{"etax_url": "https://etax.shanghai.chinatax.gov.cn:8443"})
        params = {
            "nsrmc": "",
            "nsrsbh": "",
            "zcdz": "",
            "fddbrxm": "",
            "qjswjgdm": "",
            "yzm": "",
            "curPage": "1",
        }
        yield self.Request(self.url, callback=self.save_cookie, cb_kwargs=dict(params=params))

    def save_cookie(self, response, **kwargs):
        cookie = (
            dict([response.headers.to_unicode_dict().get("Set-Cookie").split(";")[0].split("=")])
            if response.headers.to_unicode_dict().get("Set-Cookie")
            else {}
        )
        if not self.cookies:
            self.cookies = cookie
        yield from self.generate_sh_image(response, **kwargs)

    def generate_sh_image(self, response, **kwargs):
        if not self.code:
            url = f"https://shanghai.chinatax.gov.cn/newxbwz/servlet/GetshowimgSmall?{time.time() * 1000}"
            yield self.Request(url, callback=self.predict_sh_image, cb_kwargs=kwargs, cookies=self.cookies)
        else:
            url = "https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXzdsswfajgblCtrl-getxxsByYzm.pfv"
            params = kwargs.get("params", {})
            yield self.Request(
                url + "?" + urlencode(params), callback=self.parse_list, cb_kwargs=kwargs, cookies=self.cookies
            )

    def predict_sh_image(self, response, **kwargs):
        image_res = requests.post(
            "http://129.204.13.211:19952/captcha/v1",
            json={
                "image": base64.b64encode(response.body).decode(),
                "secret_key": "ydEc1x9mjg1iqQDLDNELM54zcOOmN1HI",
                "model_name": "common-captcha-CNNX-GRU-H64-CTC-C1_8800",
            },
        ).json()
        self.logger.info(f"验证码识别结果：{image_res}")
        yzm = image_res["message"]
        url = "https://shanghai.chinatax.gov.cn/newxbwz/tycx/TYCXzdsswfajgblCtrl-getxxsByYzm.pfv"
        params = kwargs.get("params", {})
        params["yzm"] = yzm
        self.code = yzm
        yield self.Request(
            url + "?" + urlencode(params), callback=self.parse_list, cb_kwargs=kwargs, cookies=self.cookies
        )

    def parse_list(self, response, **kwargs):
        self.logger.info(response.json())
        params = kwargs.get("params", {})
        yield from self.parse_detail(response, **kwargs)
        total_page_num = int(response.json().get("pageCount", 1))
        cur_page = int(params.get("curPage", 1))
        if cur_page < total_page_num:
            yield from self.generate_sh_image(None, **dict(params={**params, **{"curPage": cur_page + 1}}))

    def parse_detail(self, response, **kwargs):
        data_list = response.json().get("pageData", [])
        mapping_dict = {
            "nsrmc": "company_name",
            "zcdz": "address",
            "nsrsbm": "taxpayer_id",
            "zzjgdm": "org_code",
            "ajMc": "illegal_status",
        }
        for data in data_list:
            item = NetTaxIllegalProItem()
            item.province = self.province
            for i in mapping_dict.keys():
                setattr(item, mapping_dict.get(i), data.get(i))
            if data.get("frdbmc"):
                item.legal_representative = "{} {} {}".format(
                    data.get("frdbmc", ""), data.get("frdbxb", ""), data.get("frdbsfz", "")
                )
            illegal_facts_src = data.get("wfss", "")
            illegal_facts = re.findall(
                r"(.*(?:主要存在|存在以下问题)(?:[^根]*?[。，;；])*)[^。]*?(?:根据|依照|依据)+", illegal_facts_src
            )
            illegal_facts = illegal_facts[0] if illegal_facts else ""
            if illegal_facts:
                basis_and_punishment = illegal_facts_src.replace(illegal_facts, "")
            else:
                illegal_facts = illegal_facts_src
                basis_and_punishment = ""
            item.basis_and_punishment = basis_and_punishment.strip()
            item.illegal_facts = illegal_facts.strip()
            item.year = str(datetime.datetime.fromtimestamp(int(data.get("wjtime")) / 1000).date()).replace("-", "")
            yield item


if __name__ == "__main__":

    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "shanghai_illegal"])
