#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/26 13:58
# @Author  : 王凯
# @File    : patent_search.py
# @Project : scrapy_spider
import datetime
import hashlib
import json
import multiprocessing
import sys
import time
from pathlib import Path
from queue import Queue
from typing import Any

import scrapy
from scrapy.http import Response
from scrapy.settings import BaseSettings

from apps.patent.clean.clean_model import CleanPatentModel

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())

from apps.patent.patent.items import NetPatentProRowsItem
from apps.patent.patent.spiders import PatentParser
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB

CAT_MAPPING = {
    "1": "发明公布",
    "2": "发明公布更正",
    "3": "发明授权",
    "4": "发明授权更正",
    "6": "实用新型",
    "9": "外观设计",
    "10": "外观设计更正",
}


class RosterPatentSpider(RedisTaskSpider):
    save_mysql_log = True
    uk_id = None
    to_db = None
    wfq_source_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:tyc:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
        "RETRY_TIMES": 100,
    }

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )
        self.clean_cls = CleanPatentModel()

    def add_task(self):
        record_task_table = self.redis_key + ":id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id from "
                f"roster_patent where id > {last_task_id} and id < {last_task_id + batch}"
            )
        else:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id from "
                f"roster_patent where id <  {batch}"
            )
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(
                    self.redis_key, last_task_id, last_task_id + batch, len(datas)
                )
            )
            for data in datas:

                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): -2})

            self.server.set(record_task_table, last_task_id + batch)
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)

    def make_request_from_data(self, formatted_data: str or dict):
        url = "http://epub.cnipa.gov.cn/Dxb/PageQuery"
        company_name = formatted_data["query_key"]
        if "，" in company_name:
            company_name_list = company_name.split("，")
        elif "," in company_name:
            company_name_list = company_name.split(",")
        else:
            company_name_list = [company_name]
        for company_name in company_name_list:
            for pub_type, pub_value in CAT_MAPPING.items():
                data = {
                    "searchCatalogInfo.Pubtype": pub_type,
                    "searchCatalogInfo.Ggr_Begin": "",
                    "searchCatalogInfo.Ggr_End": "",
                    "searchCatalogInfo.Pd_Begin": "",
                    "searchCatalogInfo.Pd_End": "",
                    "searchCatalogInfo.An": "",
                    "searchCatalogInfo.Pn": "",
                    "searchCatalogInfo.Ad_Begin": "",
                    "searchCatalogInfo.Ad_End": "",
                    "searchCatalogInfo.E71_73": f"{company_name}",
                    "searchCatalogInfo.E72": f"{company_name}",
                    "searchCatalogInfo.Edz": f"{company_name}",
                    "searchCatalogInfo.E51": "",
                    "searchCatalogInfo.Ti": f"{company_name}",
                    "searchCatalogInfo.Abs": f"{company_name}",
                    "searchCatalogInfo.Edl": f"{company_name}",
                    "searchCatalogInfo.E74": f"{company_name}",
                    "searchCatalogInfo.E30": "",
                    "searchCatalogInfo.E66": "",
                    "searchCatalogInfo.E62": "",
                    "searchCatalogInfo.E83": "",
                    "searchCatalogInfo.E85": "",
                    "searchCatalogInfo.E86": "",
                    "searchCatalogInfo.E87": "",
                    "pageModel.pageNum": "1",
                    "pageModel.pageSize": "10",
                    "sortFiled": "ggr_desc",
                    "searchAfter": "",
                    "showModel": "1",
                    "isOr": "True",
                    "__RequestVerificationToken": "",
                }
                yield scrapy.FormRequest(
                    url,
                    formdata=data,
                    callback=self.parse,
                    cb_kwargs=dict(pub_type=pub_type, company_name=company_name),
                )

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                query_key = task_obj.get("query_key")
                taxpayer_id = task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{query_key}{time.time()}".encode("utf-8")).hexdigest()
                self.to_db.add_batch_smart(
                    "net_patent_task_log",
                    [
                        {
                            "query_key": query_key,
                            "uk_id": self.uk_id,
                            "taxpayer_id": taxpayer_id,
                            "crawler_start": datetime.datetime.now(),
                        }
                    ],
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    query_key = task_obj.get("query_key")
                    counts = self.to_db.find(
                        f"""select count(distinct app_number) as count from {NetPatentProRowsItem().table_name} where applicant_name_one = '{query_key}'""",
                        to_json=True,
                    )
                    count = 0
                    if counts:
                        count = counts[0]["count"]

                    self.to_db.add_batch_smart(
                        "net_patent_task_log",
                        [
                            {
                                "query_key": query_key,
                                "uk_id": self.uk_id,
                                "crawler_end": datetime.datetime.now(),
                                "count": count,
                            }
                        ],
                        update_columns=["crawler_end", "count"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")
        self.clean_cls.run(task_obj.get("query_key"))


class THSQueueSpider(RosterPatentSpider):
    pg_db = None

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.pg_db = PostgresqlDB()

    def add_task(self):
        record_task_table = self.redis_key + ":ths:id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from"
                f" enterprise_basic_info where id > {last_task_id} "
                f" and isvalid = 1 limit {batch}"
            )
        else:
            sql = (
                f"SELECT corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where id <  {batch}"
                " and isvalid = 1 limit {batch}"
            )
        datas = self.pg_db.find(sql, to_json=True)
        if datas:
            last_task_id_next = datas[-1]["seq"]
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(
                    record_task_table, last_task_id, last_task_id_next, len(datas)
                )
            )
            for data in datas:

                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            self.server.set(record_task_table, last_task_id_next)
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {record_task_table} {last_task_id}")
            self.server.set(record_task_table, last_task_id)


class PatentSearchSpider(RosterPatentSpider, PatentParser):
    name = "patent_search"
    local_task_queue = Queue(maxsize=1)
    auto_next = True

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        DOWNLOADER_MIDDLEWARES = settings.getdict("DOWNLOADER_MIDDLEWARES")
        DOWNLOAD_HANDLERS = settings.getdict("DOWNLOAD_HANDLERS")
        DOWNLOAD_HANDLERS.update({
            "http": "utils.rs_utils.rs_downloader_handler.RS6DownloadHandler",
            "https": "utils.rs_utils.rs_downloader_handler.RS6DownloadHandler",
        })
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [400, 404, 500, 200, 202, 502, 503, 412, 202],
                "RETRY_TIMES": 100,
                "RETRY_HTTP_CODES": [400, 404],
                "DOWNLOADER_MIDDLEWARES": DOWNLOADER_MIDDLEWARES,
                "DOWNLOAD_HANDLERS": DOWNLOAD_HANDLERS,
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def start_requests(self):
        if self.local_task_queue.full():
            obj = self.local_task_queue.get_nowait()
            company_name = obj.get("query_key")
        else:
            company_name = "青岛环秀涂料有限公司"
        url = "http://epub.cnipa.gov.cn/Dxb/PageQuery"
        for pub_type, v in CAT_MAPPING.items():
            data = self._gen_search_data(page=1, company_name=company_name, pub_type=pub_type)
            yield scrapy.FormRequest(
                url, formdata=data, callback=self.parse, cb_kwargs=dict(pub_type=pub_type, company_name=company_name)
            )

    def parse(self, response: Response, **kwargs: Any) -> Any:
        url = "http://epub.cnipa.gov.cn/Dxb/PageQuery"
        total = response.xpath("//script").re_first(r"total_item:\s*(\d+)")
        if total:
            self.logger.info(f"爬取[{kwargs['company_name']}:{kwargs['pub_type']}]数量：{total}")
            data = self._gen_search_data(page=1, company_name=kwargs["company_name"], pub_type=kwargs["pub_type"])

            yield from self.parse_page_list(response, **kwargs)

            if self.auto_next is True:
                for i in range(2, int(total) + 1):
                    self.logger.info(f"[{kwargs['company_name']}:{kwargs['pub_type']}] 正在爬取第{i}页")
                    yield scrapy.FormRequest(
                        url,
                        formdata={**data, **{"pageModel.pageNum": f"{i}", "pageModel.pageSize": "10"}},
                        callback=self.parse_page_list,
                        cb_kwargs=dict(pub_type=kwargs["pub_type"], company_name=kwargs["company_name"]),
                    )
        else:
            self.logger.warning(f"爬取[{kwargs['company_name']}:{kwargs['pub_type']}]未找到记录：{response}")


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl patent_search".split())


def run_mul(max_workers=5):
    pool = multiprocessing.Pool(max_workers)
    for _ in range(max_workers):
        pool.apply_async(run)
    pool.close()
    pool.join()


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行专利爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker)
