#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/27 19:30
# @Author  : 王凯
# @File    : black_list.py
# @Project : scrapy_spider

import datetime
import hashlib
import json
import sys
import time
from pathlib import Path
from typing import Any

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())

import scrapy
from scrapy.http import Response

from apps.black_list.black_list.items import (
    NetBusinessAnomalyItem,
    NetAdministrativePenaltyItem,
    NetAdministrativeLicenseItem,
    NetSeriousDishonestyItem, NetSeriousDishonestyNameListItem, NetSeriousDishonestyCfcgNameListItem, NetSeriousDishonestySafetyNameListItem,
)
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB
from utils.tools import urlencode, urldecode, run_mul


class RosterSeriousDishonestySpider(RedisTaskSpider):
    save_mysql_log = True
    uk_id = None
    count_dict = {}
    custom_settings = {
        "LOG_LEVEL": "INFO"
    }

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self._to_db = None
        self._wfq_source_db = None

    @property
    def to_db(self):
        if not self._to_db:
            self._to_db = MysqlDB()
        return self._to_db

    @property
    def wfq_source_db(self):
        if not self._wfq_source_db:
            self._wfq_source_db = MysqlDB(
                ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
                port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
                db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
                user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
                user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
            )
        return self._wfq_source_db

    def add_task(self):
        record_task_table = self.redis_key + ':id'
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id from "
                f"roster_black_list where data_type = '经营异常' and id > {last_task_id} and id < {last_task_id + batch}"
            )
        else:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id from "
                f"roster_black_list where data_type = '经营异常' and id <  {batch}"
            )
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(
                    record_task_table, last_task_id, last_task_id + batch, len(datas)
                )
            )
            for data in datas:

                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                # used_name = data.get("used_name")
                # if used_name:
                #     for name in used_name.split(","):
                #         new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            self.server.set(record_task_table, last_task_id + batch)
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            # time.sleep(60 * 5)
            # self.add_task()
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            self.server.set(record_task_table, "0")
            self.add_task()

    def make_request_from_data(self, new_task: str or dict):
        url = "https://public.creditchina.gov.cn/private-api/getTyshxydmDetailsContent"
        taxpayer_id = new_task["taxpayer_id"]
        taxpayer_id = taxpayer_id.replace("-", "")
        query_key = new_task["query_key"]
        params = {
            "keyword": taxpayer_id or query_key,
            "scenes": "defaultscenario",
            "entityType": "1",
            "searchState": "1",
            "uuid": "",
            "tyshxydm": "",
        }
        return scrapy.Request(
            url + "?" + urlencode(params), callback=self.parse, cb_kwargs=dict(taxpayer_id=taxpayer_id or query_key)
        )

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                taxpayer_id = self.task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{taxpayer_id}{time.time()}".encode("utf-8")).hexdigest()
                self.to_db.add_smart(
                    "net_black_task_log",
                    {
                        "query_key": taxpayer_id,
                        "uk_id": self.uk_id,
                        "taxpayer_id": taxpayer_id,
                        "crawler_start": datetime.datetime.now(),
                    },
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    taxpayer_id = task_obj.get("taxpayer_id")
                    self.to_db.add_smart(
                        "net_black_task_log",
                        {
                            "query_key": taxpayer_id,
                            "uk_id": self.uk_id,
                            "crawler_end": datetime.datetime.now(),
                            "count_dict": self.count_dict,
                        },
                        update_columns=["crawler_end", "count_dict"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")


class THSQueueSpider(RosterSeriousDishonestySpider):

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self._pg_db = None

    @property
    def pg_db(self):
        if not self._pg_db:
            self._pg_db = PostgresqlDB()
        return self._pg_db

    def add_task(self):
        record_task_table = self.redis_key + ':ths:id'
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                "SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from"
                f" enterprise_basic_info where seq > {last_task_id} "
                f" and unified_social_credit_code is not null and isvalid = 1 limit {batch}"
            )
        else:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where seq <  {batch}"
                " and unified_social_credit_code is not null and isvalid = 1 limit {batch}"
            )
        datas = self.pg_db.find(sql, to_json=True)
        if datas:
            last_task_id_next = str(max([int(i['seq']) for i in datas]))
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(
                    record_task_table, last_task_id, last_task_id_next, len(datas)
                )
            )
            for data in datas:

                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                # used_name = data.get("used_name")
                # if used_name:
                #     for name in used_name.split(","):
                #         new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            self.server.set(record_task_table, last_task_id_next)
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            self.server.set(record_task_table, "0")
            self.add_task()


class BlackListSearchSpider(RosterSeriousDishonestySpider):
    name = "black_list_search"
    Request = scrapy.Request
    count_dict = {}

    # def start_requests(self):
    #     taxpayer_id = "914101007942975388"
    #     url = "https://public.creditchina.gov.cn/private-api/getTyshxydmDetailsContent"
    #     params = {
    #         "keyword": taxpayer_id,
    #         "scenes": "defaultscenario",
    #         "entityType": "1",
    #         "searchState": "1",
    #         "uuid": "",
    #         "tyshxydm": "",
    #     }
    #     yield self.Request(
    #         url + "?" + urlencode(params), callback=self.parse, cb_kwargs=dict(taxpayer_id=taxpayer_id)
    #     )

    def parse(self, response: Response, **kwargs: Any) -> Any:
        self.count_dict = {}
        yield from self.parse_yzsx(response, **kwargs)
        yield from self.parse_jyyc(response, **kwargs)
        yield from self.parse_xzcf(response, **kwargs)
        yield from self.parse_xzxk(response, **kwargs)

    def parse_yzsx(self, response, **kwargs):
        taxpayer_id = kwargs.get("taxpayer_id")
        type_name = "严重失信主体名单"
        url = "https://public.creditchina.gov.cn/private-api/typeSourceSearch"
        params = {
            "source": "",
            "type": type_name,
            "searchState": "1",
            "entityType": "1",
            "scenes": "defaultscenario",
            "keyword": taxpayer_id,
            "tyshxydm": "",
            "page": "1",
            "pageSize": "10",
        }
        punishment_status = response.json()["data"].get("punishmentStatus")
        status = response.json()["data"]["headEntity"].get("status")
        company_name = response.json()["data"]["headEntity"].get("jgmc")
        if status:
            yield self.Request(
                url + "?" + urlencode(params),
                callback=self.parse_response,
                cb_kwargs=dict(
                    punishment_status=punishment_status,
                    status=status,
                    type_name=type_name,
                    company_name=company_name,
                    taxpayer_id=taxpayer_id,
                ),
            )

    def parse_jyyc(self, response, **kwargs):
        type_name = "经营异常"
        taxpayer_id = kwargs.get("taxpayer_id")
        url = "https://public.creditchina.gov.cn/private-api/typeSourceSearch"
        params = {
            "source": "",
            "type": type_name,
            "searchState": "1",
            "entityType": "1",
            "scenes": "defaultscenario",
            "keyword": taxpayer_id,
            "tyshxydm": "",
            "page": "1",
            "pageSize": "10",
        }
        punishment_status = response.json()["data"].get("punishmentStatus")
        status = response.json()["data"]["headEntity"].get("status")
        company_name = response.json()["data"]["headEntity"].get("jgmc")
        if status:
            yield self.Request(
                url + "?" + urlencode(params),
                callback=self.parse_response,
                cb_kwargs=dict(
                    punishment_status=punishment_status,
                    status=status,
                    type_name=type_name,
                    company_name=company_name,
                    taxpayer_id=taxpayer_id,
                ),
            )

    def parse_xzcf(self, response, **kwargs):
        type_name = "行政处罚"
        taxpayer_id = kwargs.get("taxpayer_id")
        url = "https://public.creditchina.gov.cn/private-api/typeSourceSearch"
        params = {
            "source": "",
            "type": type_name,
            "searchState": "1",
            "entityType": "1",
            "scenes": "defaultscenario",
            "keyword": taxpayer_id,
            "tyshxydm": "",
            "page": "1",
            "pageSize": "10",
        }
        punishment_status = response.json()["data"].get("punishmentStatus")
        status = response.json()["data"]["headEntity"].get("status")
        company_name = response.json()["data"]["headEntity"].get("jgmc")
        if status:
            yield self.Request(
                url + "?" + urlencode(params),
                callback=self.parse_response,
                cb_kwargs=dict(
                    punishment_status=punishment_status,
                    status=status,
                    type_name=type_name,
                    company_name=company_name,
                    taxpayer_id=taxpayer_id,
                ),
            )

    def parse_xzxk(self, response, **kwargs):
        type_name = "行政许可"
        taxpayer_id = kwargs.get("taxpayer_id")
        url = "https://public.creditchina.gov.cn/private-api/typeSourceSearch"
        params = {
            "source": "",
            "type": type_name,
            "searchState": "1",
            "entityType": "1",
            "scenes": "defaultscenario",
            "keyword": taxpayer_id,
            "tyshxydm": "",
            "page": "1",
            "pageSize": "10",
        }
        punishment_status = response.json()["data"].get("punishmentStatus")
        status = response.json()["data"]["headEntity"].get("status")
        company_name = response.json()["data"]["headEntity"].get("jgmc")
        if status:
            yield self.Request(
                url + "?" + urlencode(params),
                callback=self.parse_response,
                cb_kwargs=dict(
                    punishment_status=punishment_status,
                    status=status,
                    type_name=type_name,
                    company_name=company_name,
                    taxpayer_id=taxpayer_id,
                ),
            )

    def parse_response(self, response, **kwargs):
        punishment_status = kwargs.get("punishment_status")
        taxpayer_id = kwargs.get("taxpayer_id")
        status = kwargs.get("status")
        type_name = kwargs.get("type_name")
        company_name = kwargs.get("company_name")
        yield from self.parse_item(response, **kwargs)
        total_size = response.json()["data"]["totalSize"]
        total = response.json()["data"]["total"]
        self.count_dict.update({type_name: total})
        self.logger.info(f"获取到数据 {type_name} {taxpayer_id} total:{total} total_size:{total_size}")
        url = "https://public.creditchina.gov.cn/private-api/typeSourceSearch"
        params = urldecode(response.request.url)
        for page in range(2, total_size + 1):
            yield self.Request(
                url + "?" + urlencode({**params, "page": str(page)}),
                callback=self.parse_item,
                cb_kwargs=dict(
                    punishment_status=punishment_status,
                    status=status,
                    type_name=type_name,
                    company_name=company_name,
                    taxpayer_id=taxpayer_id,
                ),
            )

    def parse_item(self, response, **kwargs):
        punishment_status = kwargs.get("punishment_status")
        status = kwargs.get("status")
        type_name = kwargs.get("type_name")
        taxpayer_id = kwargs.get("taxpayer_id")
        company_name = kwargs.get("company_name")
        datas = response.json()["data"].get("list", [])

        if type_name == "严重失信主体名单":
            mapping = {
                "name": "iname",
                "taxpayer_id": "cardnumber",
                "court_of_execution": "court_name",
                "bureau": "area_name",
                "docunumber": "gist_cid",
                "filing_time": "reg_date",
                "reference": "case_code",
                "unit": "gist_unit",
                "obligation": "duty",
                "performance_status": "performance",
                "dpecific_situation": "disreput_type_name",
                "release_time": "publish_date",
                "fulfilled_portion": "performed_part",
                "unfulfilled_portion": "unperform_part",
            }
            for detail in datas:
                data_source = detail.get("dataSource")
                table_name = detail.get("table_name")
                if table_name == "credit_czb_fr_zfcgblxwjl":
                    # 政府采购严重违法失信行为记录名单
                    info_dict = detail["entity"]
                    yield NetSeriousDishonestyCfcgNameListItem(**dict(
                        company_name=info_dict.get('gyshcgdljgmc'),
                        taxpayer_id=info_dict.get('tyshxydmhzzjgdm'),
                        address=info_dict.get('dz'),
                        cf_nr=info_dict.get('blxwdjtqk'),
                        cf_yj=info_dict.get('cfyj'),
                        cf_jg=info_dict.get('cfjg'),
                        cf_jdrq=info_dict.get('cfjlrq'),
                        department=info_dict.get('cfjldw'),
                        end_date=info_dict.get('cfjzrq'),
                        data_sources=data_source,
                        keyword=taxpayer_id,
                    ))
                if table_name == "credit_yjglb_fr_aqscmd_2023":
                    # 安全生产严重失信主体名单
                    info_dict = detail["entity"]
                    yield NetSeriousDishonestySafetyNameListItem(**dict(
                        company_name=info_dict.get('lrmdscjydwmc'),
                        taxpayer_id=info_dict.get('tyshxydm'),
                        address=info_dict.get('djzcdz'),
                        person_name=info_dict.get('lrmdryxm'),
                        id_cards=info_dict.get('yxsfzjhm'),
                        manger_date=info_dict.get('mdglqx'),
                        department=info_dict.get('zclrjdbm'),
                        data_sources=data_source,
                        keyword=taxpayer_id,
                    ))
                if table_name == "credit_gjtjj_fr_tjsyzsxqy":
                    # 统计严重失信企业名单
                    info_dict = detail["entity"]
                    yield NetSeriousDishonestyNameListItem(**dict(
                        company_name=info_dict.get('qymc'),
                        taxpayer_id=info_dict.get('tyshxydm'),
                        bureau=info_dict.get('dz'),
                        legal_person=info_dict.get('fddbrfzrxm'),
                        cf_wsh=info_dict.get('xzcfjdswh'),
                        cf_ss=info_dict.get('wfss'),
                        cf_yj=info_dict.get('cfyj'),
                        cf_nr=info_dict.get('cfnr'),
                        cf_jdrq=info_dict.get('cfjdrq'),
                        sx_wsh=info_dict.get('tjyzsxrdjdswh'),
                        recognition_basis=info_dict.get('rdyj'),
                        accreditation_authority=info_dict.get('rdjg'),
                        sx_jdrq=info_dict.get('tjyzsxrdjdrq'),
                        publicity_period=info_dict.get('gsqx'),
                        current_operating_status=info_dict.get('qydqjyzt'),
                        notes=info_dict.get('bz'),
                        data_sources=data_source,
                        keyword=taxpayer_id,
                    ))
                if table_name == 'credit_zgf_fr_sxbzxr':
                    # 失信被执行人（法人）
                    data = {k: detail["entity"][v] for k, v in mapping.items()}
                    data.update(
                        {
                            "tag": "失信惩戒对象" if punishment_status == "yes" else None,
                            "data_sources": data_source,
                            "business_status": status,
                            "keyword": taxpayer_id,
                        }
                    )
                    yield NetSeriousDishonestyItem(**data)
        elif type_name == "经营异常":
            mapping = {
                "company_name": "entname",
                "taxpayer_id": "uniscid",
                "legal_person": "lerep",
                "pripid": "pripid",
                "regno": "regno",
                "specausename": "specausename",
                "abntime": "abntime",
                "decorgname": "decorgname",
            }
            for detail in datas:
                data_source = detail.get("dataSource")
                data = {k: detail["entity"][v] for k, v in mapping.items()}
                data.update({"data_sources": data_source, "keyword": taxpayer_id})
                yield NetBusinessAnomalyItem(**data)
        elif type_name == "行政处罚":
            mapping = {
                "cf_wsh": "cf_wsh",
                "cf_cflb": "cf_cflb",
                "cf_jdrq": "cf_jdrq",
                "cf_nr": "cf_nr",
                "cf_nr_fk": "cf_nr_fk",
                "cf_nr_wfff": "cf_nr_wfff",
                "cf_nr_zkdx": "cf_nr_zkdx",
                "cf_wfxw": "cf_wfxw",
                "cf_sy": "cf_sy",
                "cf_yj": "cf_yj",
                "cf_cfjg": "cf_cfjg",
                "cf_cfjgdm": "cf_cfjgdm",
                "cf_sjlydm": "cf_sjlydm",
                "data_sources": "cf_sjly",
            }
            for detail in datas:
                if "cf_wsh" not in detail["entity"].keys():
                    continue
                else:
                    data = {k: detail["entity"][v] for k, v in mapping.items()}
                    data.update(
                        {
                            "company_name": company_name,
                            "taxpayer_id": taxpayer_id,
                            "keyword": taxpayer_id,
                        }
                    )
                yield NetAdministrativePenaltyItem(**data)

        elif type_name == "行政许可":
            mapping = {
                "xk_wsh": "xk_wsh",
                "xk_wsmc": "xk_xkws",
                "xk_zsmc": "xk_xkzs",
                "xk_lx": "xk_xklb",
                "xk_bh": "xk_xkbh",
                "xk_jdrq": "xk_jdrq",
                "xk_yxqq": "xk_yxqz",
                "xk_yxqz": "xk_yxqzi",
                "xk_nr": "xk_nr",
                "xk_jg": "xk_xkjg",
                "xk_xydm": "xk_xkjgdm",
            }
            for detail in datas:
                data = {}
                if "xk_wsh" not in detail["entity"].keys():
                    continue
                else:
                    if "xk_xkws" in detail["entity"]:  # 新标准
                        data = {k: detail["entity"].get(v) for k, v in mapping.items()}
                        data.update(
                            {
                                "company_name": company_name,
                                "taxpayer_id": taxpayer_id,
                                "keyword": taxpayer_id,
                            }
                        )
                if data:
                    yield NetAdministrativeLicenseItem(**data)


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl black_list_search".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=10, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
