#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/7/25 17:48
# @Author  : 王凯
# @File    : comapny_search_gsxt.py
# @Project : scrapy_spider
import json
import re
from typing import Any

import scrapy
from scrapy.http import Response

from apps.gsxt.gsxt.spiders.company_search_parser_gsxt import CompanySearchGsxtParser
from apps.gsxt.gsxt.spiders.update_ths_redis_company_info import UpdateThsRedisCompanyInfo
from utils.tools import run_mul


class CompanySearchGsxt(scrapy.Spider):
    name = "comapny_search_gsxt"
    base_url = "https://www.gsxt.gov.cn"
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Pragma": "no-cache",
        "Upgrade-Insecure-Requests": "1",
        "Origin": base_url,
        "Referer": f"{base_url}/corp-query-search-1.html",
    }

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.all_data = []

    def parse_card(self, response, **kwargs):
        yield self.parse_data(response, **kwargs)
        keywords = kwargs.get("keywords")
        cards = response.xpath("//*[contains(@class, 'search_list_item')]")
        if cards:
            card = cards[0]
            detail_url = self.base_url + card.xpath("../a/@href").get()
            yield scrapy.Request(detail_url, callback=self.parse)
        else:
            self.logger.error(f"未查询到该企业数据 {keywords}")

    def parse(self, response: Response, **kwargs: Any) -> Any:
        yield self.parse_data(response, **kwargs)
        url_mapping = {k: self.base_url + v for k, v in re.findall(r"var\s+(.*?)\s*=\s*\"(.*?)\"\s*", response.text)}
        for name, url in url_mapping.items():
            if name in ["shareholderUrl", "alterInfoUrl", "anCheYearInfo", 'gtAlertInfoUrl']:
                yield from self.gen_query(name, url, referer=response.request.url)

    def gen_query(self, name, url, referer, num=1):
        offset = (num - 1) * 5
        data = {"draw": str(num), "start": str(offset), "length": "5"}
        yield scrapy.FormRequest(
            url,
            method="POST",
            formdata=data,
            callback=self.parse_query,
            headers={**self.headers, "Referer": referer},
            cb_kwargs={"name": name, "referer": referer},
        )

    def parse_query(self, response, **kwargs):
        yield self.parse_data(response, **kwargs)
        name = kwargs.get("name")
        referer = kwargs.get("referer")
        data = response.json()
        if name == "anCheYearInfo":
            for i in data:
                anCheId = i.get("anCheId")
                url = f"{self.base_url}/corp-query-entprise-info-sponsor-{anCheId}.html"  # 年报中的股东出资
                yield from self.gen_query("anCheYear", url, referer=referer, num=1)
        else:
            totalPage = data.get("totalPage")
            results = data.get("data")
            draw = data.get("draw")
            if name == "shareholderUrl":
                for i in results:
                    invId = i.get("invId")
                    detailCheck = i.get("detailCheck")
                    if detailCheck == "true":
                        url = f"{self.base_url}/corp-query-entprise-info-shareholderDetail-{invId}.html"
                        yield scrapy.Request(url, callback=self.parse_data)
            if draw < totalPage:
                yield from self.gen_query(name, response.request.url, referer=referer, num=draw + 1)

    # ##################################################################################      解析       ################################################################################################
    def parse_data(self, response, **kwargs):
        self.logger.info(f"{response.text}")
        item = {
            "url": response.url,
            "text": response.text,
            "json": None,
        }
        try:
            item.update({"json": response.json(), "text": None})
        except Exception as e:
            pass

        self.all_data.append(item)

    @staticmethod
    def close(spider: scrapy.Spider, reason: str):
        all_data = getattr(spider, "all_data", [])
        spider.logger.info(f"原始数据：{all_data}")
        result = CompanySearchGsxtParser(all_data=all_data, logger_cls=spider.logger, base_url=getattr(spider, "base_url", "https://www.gsxt.gov.cn")).parse_home()
        spider.logger.info(json.dumps(result, ensure_ascii=False, indent=4))
        if (result.get("base_info") or {}).get("taxpayer_id"):
            is_update = input("是否更新数据到redis及同花顺?(y/n): ")
            if is_update.lower() == "y":
                UpdateThsRedisCompanyInfo(result=result).run()
        return super().close(spider, reason)

    def start_requests(self):
        keywords = input("请输入查询的税号或企业名称:")
        is_login = input("是否登录查询(y/n):")
        if is_login.lower() == "y":
            self.base_url = "https://shiming.gsxt.gov.cn"
            self.headers.update({
                "Origin": self.base_url,
                "Referer": f"{self.base_url}/corp-query-search-1.html",
            })
            self.logger.info(f"登录地址: {self.base_url}")
        self.logger.info(f"查询企业: {keywords}")
        data = {
            "tab": "ent_tab", "province": "", "geetest_challenge": "", "geetest_validate": "", "geetest_seccode": "", "lot_number": "", "captcha_output": "", "pass_token": "", "gen_time": "",
            "captchaId": "b608ae7850d2e730b89b02a384d6b9cc", "token": "", "searchword": keywords
        }
        url = f"{self.base_url}/corp-query-search-1.html"
        yield scrapy.FormRequest(url, callback=self.parse_card, formdata=data, meta={"geetest": True}, headers=self.headers, cb_kwargs={"keywords": keywords})


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl comapny_search_gsxt".split())


if __name__ == "__main__":
    run()
