#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/4/2 09:57
# @Author  : 王凯
# @File    : siku_search_spider.py
# @Project : scrapy_spider
import datetime
import hashlib
import json
import sys
import time
from pathlib import Path
from typing import Any

import scrapy
from scrapy.http import TextResponse

from components.config import WFQ_SOURCE_MYSQL_CONFIG

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.siku.siku.items import NetMohurdDetailsItem, NetMohurdQualificationItem, NetMohurdPersonnelItem, NetMohurdProjectItem
from utils.tools import urlencode, run_mul
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from utils.db.mysqldb import MysqlDB


class RosterSikuSpider(RedisTaskSpider):
    uk_id = None
    to_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
    }
    save_mysql_log = True
    source = "四库资质"

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    def add_task(self):
        record_task_table = self.redis_key + ':id'
        last_task_id = self.server.get(record_task_table)
        batch = 100
        last_task_id = int(last_task_id) if last_task_id else 0
        sql = (
            f"SELECT company_name as query_key, used_name, taxpayer_id, id from "
            f"roster_mohurd where id > {last_task_id} limit {batch}"
        )
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})
            last_task_id = max([i["id"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            # time.sleep(60 * 5)
            # self.add_task()
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            self.server.set(record_task_table, "0")
            self.add_task()

    def make_request_from_data(self, formatted_data: str or dict):
        query_key = formatted_data["taxpayer_id"] or formatted_data["query_key"]
        if not query_key.startswith("91"):
            query_key = formatted_data["query_key"]
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/list"
        params = {"complexname": query_key, "pg": "0", "pgsz": "15", "total": "0"}
        yield scrapy.Request(url=url + "?" + urlencode(params), callback=self.parse)

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                query_key = task_obj.get("query_key")
                taxpayer_id = task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{query_key}{time.time()}{self.source}".encode("utf-8")).hexdigest()
                self.to_db.add_batch_smart(
                    "net_mohurd_task_log",
                    [
                        {
                            "query_key": query_key,
                            "uk_id": self.uk_id,
                            "source": self.source,
                            "taxpayer_id": taxpayer_id,
                            "crawler_start": datetime.datetime.now(),
                        }
                    ],
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    query_key = task_obj.get("query_key")
                    taxpayer_id = task_obj.get("taxpayer_id")
                    counts = self.to_db.find(
                        f"""select count(*) as count
                                from net_mohurd_qualification
                                where taxpayer_id = '{taxpayer_id}' or enterprise_id = '{taxpayer_id}'""",
                        to_json=True,
                    )
                    count = 0
                    if counts:
                        count = counts[0]["count"]

                    self.to_db.add_batch_smart(
                        "net_mohurd_task_log",
                        [
                            {
                                "query_key": query_key,
                                "uk_id": self.uk_id,
                                "crawler_end": datetime.datetime.now(),
                                "count": count,
                            }
                        ],
                        update_columns=["crawler_end", "count"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")


class SiKuSearchSpider(RosterSikuSpider):
    name = "siku_search"

    # def start_requests(self) -> Iterable[Request]:
    #     url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/list"
    #     params = {"complexname": "91110000100010724P", "pg": "0", "pgsz": "15", "total": "0"}
    #     yield scrapy.Request(url=url + "?" + urlencode(params), callback=self.parse)

    def parse(self, response: TextResponse, **kwargs):
        resp = response.json()
        datas = resp["data"]["list"]
        for data in datas:
            params = {"compId": data["QY_ID"]}
            url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/compDetail"
            yield scrapy.Request(url=url + "?" + urlencode(params), callback=self.parse_company_detail)  # 企业详情

    def parse_company_detail(self, response, **kwargs):
        """
        {'code': 200, 'data': {'mcList': [], 'compMap': {'QY_ID': '002105291239451309', 'QY_ORG_CODE': '91610800064834709T', 'OLD_CODE': '064834709', 'QY_NAME': '榆林永邦建设工程有限公司',
         'QY_OLD_NAME': None, 'QY_REGION': '610800', 'QY_REGION_NAME': '陕西省-榆林市', 'QY_REG_ADDR': None, 'QY_ADDR': '陕西省榆林市榆阳区金沙路汇金绿园2号写字楼701室', 'QY_ZIPCODE': '719000',
         'QY_PARENT_COMP': None, 'QY_LSGX': None, 'QY_LSGX_NAME': None, 'QY_JJXZ': '有限责任公司（自然人独资）', 'QY_JJXZ_NAME': None, 'QY_GSZCLX': 'QY_ZCLX_1592', 'QY_GSZCLX_NAME': '有限责任公司（自然人独资）',
          'QY_REG_MONEY': 6000, 'QY_REG_CURTYPE': 'B_BZLB_001', 'QY_REG_CURTYPE_NAME': '人民币', 'QY_REG_DATE': None, 'QY_FR_NAME': '马拴柱', 'QY_FR_CARDTYPE': None, 'QY_FR_CARDTYPE_NAME': None,
           'QY_FR_CARDNO': None, 'QY_OFFICE_TEL': '13892227728', 'QY_OFFICE_FAX': None, 'QY_LXR': None, 'QY_LXR_TEL': '13892227728', 'QY_LXR_MOBILE': None, 'QY_LXR_EMAIL': None,
            'QY_BAK_LXR': None, 'QY_BAK_LXR_MOBLIE': None, 'QY_BAK_LXR_EMAIL': None, 'QY_WEBSITE': None, 'QY_JY_STATUS': '1', 'QY_JY_STATUS_NAME': '正常', 'QY_REMARK': None,
            'QY_TYPE': 'QY_ZZ_ZZZD_001', 'QY_TYPE_NAME': '建筑业企业资质', 'COLLECT_TIME': 1622263196000, 'COLLECT_SOURCE': 'ZS#680832', 'QY_YYZZH': '91610800064834709T', 'QY_SRC_TYPE': '0',
            'ZJ_GOVNUM': None, 'ADMINAREANUM': '610802', 'REGION_FULLNAME_NEW': '陕西省-榆林市-榆阳区', 'IS_FAKE': 0}}, 'message': '', 'success': True}
        :param response:
        :param kwargs:
        :return:
        """
        resp = response.json()
        data = resp["data"]["compMap"]
        enterprise_id = data["QY_ID"]
        taxpayer_id = data["QY_ORG_CODE"]
        detail_json = dict(
            enterprise_registration_location=data["REGION_FULLNAME_NEW"],
            enterprise_name=data["QY_NAME"],
            taxpayer_id=data["QY_ORG_CODE"],
            legal_person=data["QY_FR_NAME"],
            business_address=data["QY_ADDR"],
            registration_type=data["QY_JJXZ"],
            registered_territory=data["QY_REGION_NAME"],
            enterprise_id=data["QY_ID"],
        )
        yield NetMohurdDetailsItem(**detail_json)

        params = {"qyId": data["QY_ID"], "pg": "0", "pgsz": "15"}
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/caDetailList"
        yield scrapy.Request(
            url=url + "?" + urlencode(params),
            callback=self.parse_certificate_detail,
            cb_kwargs={
                "enterprise_id": enterprise_id,
                "taxpayer_id": taxpayer_id,
            },
        )  # 企业资质资格

        # params = {"qyId": data["QY_ID"], "pg": "0", "pgsz": "15"}
        # url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/regStaffList"
        # yield scrapy.Request(
        #     url=url + "?" + urlencode(params),
        #     callback=self.parse_register_detail,
        #     cb_kwargs={
        #         "enterprise_id": enterprise_id,
        #         "taxpayer_id": taxpayer_id,
        #     },
        # )  # 注册人员
        #
        # params = {"qy_id": data["QY_ID"], "pg": "0", "pgsz": "15"}
        # url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/compPerformanceListSys"
        # yield scrapy.Request(
        #     url=url + "?" + urlencode(params),
        #     callback=self.parse_project_detail,
        #     cb_kwargs={
        #         "enterprise_id": enterprise_id,
        #         "taxpayer_id": taxpayer_id,
        #     },
        # )  # 工程项目

    def parse_project_detail(self, response, **kwargs):
        enterprise_id = kwargs.get("enterprise_id")
        taxpayer_id = kwargs.get("taxpayer_id")
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/compPerformanceListSys"
        resp = response.json()
        result = resp["data"]
        if result:
            total_page = result["total"] // 15
            page_num = result["pageNum"]
            datas = result["list"]
            if page_num < total_page:
                params = {"qy_id": enterprise_id, "pg": page_num + 1, "pgsz": "15"}
                yield scrapy.Request(
                    url=url + "?" + urlencode(params),
                    callback=self.parse_project_detail,
                    cb_kwargs={
                        "enterprise_id": enterprise_id,
                        "taxpayer_id": taxpayer_id,
                    },
                )  # 工程项目

            for data in datas:
                province = data["PROVINCE"] if data["PROVINCE"] else ""
                city = "-" + data["CITY"] if data["CITY"] else ""
                county = "-" + data["COUNTY"] if data["COUNTY"] else ""
                project_id = data["ID"]
                project_json = dict(
                    project_id=project_id,
                    enterprise_id=enterprise_id,
                    taxpayer_id=taxpayer_id,
                    project_no=data["PRJNUM"],
                    project_name=data["PRJNAME"],
                    project_territory=province + city + county,
                    project_category=data["PRJTYPENUM"],
                    project_unit=data["BUILDCORPNAME"],
                )
                yield NetMohurdProjectItem(**project_json)

    def parse_register_detail(self, response, **kwargs):
        enterprise_id = kwargs.get("enterprise_id")
        taxpayer_id = kwargs.get("taxpayer_id")
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/regStaffList"
        resp = response.json()
        result = resp["data"]["pageList"]
        total_page = result["total"] // 15
        page_num = result["pageNum"]
        datas = result["list"]
        if page_num < total_page:
            params = {"qyId": enterprise_id, "pg": page_num + 1, "pgsz": "15"}
            yield scrapy.Request(
                url=url + "?" + urlencode(params),
                callback=self.parse_register_detail,
                cb_kwargs={
                    "enterprise_id": enterprise_id,
                    "taxpayer_id": taxpayer_id,
                },
            )  # 注册人员

        for data in datas:
            register_json = dict(
                enterprise_id=enterprise_id,
                taxpayer_id=taxpayer_id,
                enrollment=data["RN"],
                name=data["RY_NAME"],
                id_num=data["IDCARD"],
                registration_category=data["REG_PROF_NAME"] or '',
                registration_no=data["REG_SEAL_CODE"],
                registration_major=data["REG_TYPE_NAME"],
            )
            yield NetMohurdPersonnelItem(**register_json)

    def parse_certificate_detail(self, response, **kwargs):
        enterprise_id = kwargs.get("enterprise_id")
        taxpayer_id = kwargs.get("taxpayer_id")
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/caDetailList"
        resp = response.json()
        result = resp["data"]["pageList"]
        total_page = result["total"] // 15
        page_num = result["pageNum"]
        datas = result["list"]
        if page_num < total_page:
            params = {"qyId": enterprise_id, "pg": page_num + 1, "pgsz": "15"}
            yield scrapy.Request(
                url=url + "?" + urlencode(params),
                callback=self.parse_certificate_detail,
                cb_kwargs={
                    "enterprise_id": enterprise_id,
                    "taxpayer_id": taxpayer_id,
                },
            )  # 企业资质资格
        for data in datas:
            certificate_json = dict(
                enterprise_id=enterprise_id,
                taxpayer_id=taxpayer_id,
                qualification_category=data["APT_TYPE_NAME"],
                qualification_certificate_number=data["APT_CERTNO"],
                qualifications_name=data["APT_NAME"],
                date_issuance=datetime.datetime.fromtimestamp(data["APT_GET_DATE"] / 1000).strftime("%Y-%m-%d") if data["APT_GET_DATE"] else None,
                validity_date=datetime.datetime.fromtimestamp(data["APT_EDATE"] / 1000).strftime("%Y-%m-%d") if data["APT_EDATE"] else None,
                authority_issuing=data["APT_GRANT_UNIT"],
            )
            yield NetMohurdQualificationItem(**certificate_json)


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl siku_search".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
