#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/4/3 15:38
# @Author  : 王凯
# @File    : siku_company_spider.py
# @Project : scrapy_spider
import json
import multiprocessing
import sys
from pathlib import Path
from typing import Iterable, Any

import scrapy
from scrapy import Request
from scrapy.http import TextResponse
from scrapy.settings import BaseSettings

from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.siku.siku.items import NetMohurdDetailsItem
from components.component.scrapy_redis_custom.spiders import RedisSpider
from components.settings.private.wfq_dev_redis_settings import REDIS_URL
from utils.tools import urlencode


class SiKuCompanySpider(RedisSpider):
    name = "siku_company"

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        with open(f"{Path(__file__).parent.as_posix()}/apt_codes.json", "r", encoding="utf-8") as f:
            self.apt_codes = json.loads(f.read())
        with open(f"{Path(__file__).parent.as_posix()}/city_codes.json", "r", encoding="utf-8") as f:
            self.city_codes = json.loads(f.read())
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        downloader_middlewares = settings.getdict("DOWNLOADER_MIDDLEWARES")
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "SCHEDULER": "components.component.scrapy_redis_custom.scheduler.Scheduler",
                "DUPEFILTER_CLASS": "components.component.scrapy_redis_custom.dupefilter.RFPDupeFilter",
                "SCHEDULER_PERSIST": True,
                "REDIS_URL": REDIS_URL,
                "DOWNLOAD_TIMEOUT": 30,
                "DOWNLOADER_MIDDLEWARES": {
                    **downloader_middlewares,
                    "components.middlewares.downloadmiddlewares.public.company_ip_by_api.CompanyIpByApiMiddleware": 500,
                    "components.middlewares.downloadmiddlewares.public.useragent_random.RandomUserAgentMiddleware": 500,
                },
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def start_requests(self) -> Iterable[Request]:
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/list"
        for apt in self.apt_codes:
            for city in self.city_codes:
                qy_region = city["region_id"]
                apt_code = apt["APT_CODE"]
                qy_type = apt["APT_TYPE"]
                params = {
                    "pg": "0",
                    "pgsz": "15",
                    "total": "0",
                    "qy_region": qy_region,
                    "apt_code": apt_code,
                    "qy_type": qy_type,
                }
                yield scrapy.Request(
                    url=url + "?" + urlencode(params),
                    callback=self.parse,
                    cb_kwargs={
                        "qy_region": qy_region,
                        "apt_code": apt_code,
                        "qy_type": qy_type,
                    },
                )
                params_order_time = {
                    "pg": "0",
                    "pgsz": "15",
                    "total": "0",
                    "qy_region": qy_region,
                    "apt_code": apt_code,
                    "qy_type": qy_type,
                    "orderby": "time"
                }
                yield scrapy.Request(
                    url=url + "?" + urlencode(params_order_time),
                    callback=self.parse,
                    cb_kwargs={
                        "qy_region": qy_region,
                        "apt_code": apt_code,
                        "qy_type": qy_type,
                    },
                )

    def parse(self, response: TextResponse, **kwargs: Any) -> Any:
        url = "https://jzsc.mohurd.gov.cn/APi/webApi/dataservice/query/comp/list"
        qy_region = kwargs.get("qy_region")
        apt_code = kwargs.get("apt_code")
        resp = response.json()
        total_page = resp["data"]["total"] // 15
        page_num = resp["data"]["pageNum"]
        datas = resp["data"]["list"]

        if 'orderby' not in response.request.url:
            if page_num < total_page:
                if page_num + 1 < 30:
                    # 只展示前 450 条
                    params = {"qy_region": qy_region, "apt_code": apt_code, "pg": page_num + 1, "pgsz": "15", "total": "0"}
                    yield scrapy.Request(
                        url=url + "?" + urlencode(params),
                        callback=self.parse,
                    )
                else:
                    # 倒序按时间查只能查一页
                    params = {"qy_region": qy_region, "apt_code": apt_code, "pg": "0", "pgsz": "15", "total": "0", "orderby": "time"}
                    yield scrapy.Request(
                        url=url + "?" + urlencode(params),
                        callback=self.parse,
                    )

        for data in datas:
            detail_json = dict(
                enterprise_name=data["QY_NAME"],
                enterprise_id=data["QY_ID"],
                taxpayer_id=data["QY_ORG_CODE"],
                legal_person=data["QY_FR_NAME"],
                registered_territory=data["QY_REGION_NAME"],
            )
            yield NetMohurdDetailsItem(**detail_json)
        if datas:
            self.wfq_source_db.add_batch_smart("roster_mohurd", [{"company_name": i["QY_NAME"], "taxpayer_id": i['QY_ID'], "source": "四库资质"} for i in datas])


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl siku_company".split())


def run_mul(max_workers=5):
    pool = multiprocessing.Pool(max_workers)
    for _ in range(max_workers):
        pool.apply_async(run)
    pool.close()
    pool.join()


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker)
