#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/28 14:51
# @Author  : 王凯
# @File    : patent_sw.py
# @Project : scrapy_spider
import json
import multiprocessing
from typing import Iterable, Any

import scrapy
from scrapy import Request

from apps.patent.clean.clean_model import CleanPatentModel
from apps.patent.patent.items import (
    NetPatentLawStatusItem,
    NetPatentProUpdatePatentStatusItem,
    NetPatentProRowsUpdatePatentStatusItem,
)
from apps.patent.patent.spiders import PatentParser
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from utils.db.mysqldb import MysqlDB
from utils.tools import send_msg


class AutoFixSwSpider(RedisTaskSpider):
    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.clean_cls = CleanPatentModel()

    def add_task(self):
        sql = """
        select app_number, patent_type, applicant_name, taxpayer_id, company_name
        from net_patent_clean
        where law_status is null and  taxpayer_id != '' and valid = 1
        limit 10
        """
        datas = self.to_db.find(sql, to_json=True)
        if datas:
            for data in datas:
                if data['taxpayer_id']:
                    self.server.zadd(self.redis_key, {json.dumps(data, ensure_ascii=False): 0})
        else:
            self.logger.info("没数据了")
            send_msg("专利 空法律状态修复 没数据了")

    def make_request_from_data(self, formatted_data: str or dict):
        taxpayer_id = formatted_data.get("taxpayer_id")
        sql = f"select app_number, patent_type, applicant_name from net_patent_clean where taxpayer_id = '{taxpayer_id}' and law_status is null"
        datas = self.to_db.find(sql, to_json=True)
        for data in datas:
            pub_type_zh = data["patent_type"]
            app_number = data["app_number"]
            applicant_name = data["applicant_name"]
            if "外观" in pub_type_zh:
                pub_type = "9"
            elif "实用" in pub_type_zh:
                pub_type = "6"
            else:
                pub_type = "3"
            url = "http://epub.cnipa.gov.cn/SW/SwListQuery"
            data = {
                "sortField": "ggr_desc",
                "pageSize": "10",
                "pageNum": "1",
                "swInfo.SwType": "",
                "swInfo.PubType": "",
                "isFmsq": "false",
                "isXx": "false",
                "isWg": "false",
                "swInfo.An": app_number,
                "swInfo.SwPubdate": "",
                "swInfo.SwInfo": "",
                "trsSql": "",
                "__RequestVerificationToken": "",
            }
            yield scrapy.FormRequest(
                url,
                formdata=data,
                callback=self.parse_detail_by_query,
                cb_kwargs=dict(
                    app_number=app_number,
                    patent_type=pub_type_zh,
                    applicant_name=applicant_name,
                    pub_type=pub_type,
                ),
            )

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            self.clean_cls.run(task_obj.get("company_name"))


class PatentSw(AutoFixSwSpider, PatentParser):
    name = "patent_sw"

    @classmethod
    def update_settings(cls, settings) -> None:
        DOWNLOADER_MIDDLEWARES = settings.getdict("DOWNLOADER_MIDDLEWARES")
        DOWNLOAD_HANDLERS = settings.getdict("DOWNLOAD_HANDLERS")
        DOWNLOAD_HANDLERS.update({
            "http": "utils.rs_utils.rs_downloader_handler.RS6DownloadHandler",
            "https": "utils.rs_utils.rs_downloader_handler.RS6DownloadHandler",
        })
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [400, 404, 500, 200, 202, 502, 503, 412, 202],
                "RETRY_TIMES": 100,
                "RETRY_HTTP_CODES": [400, 404],
                "DOWNLOADER_MIDDLEWARES": DOWNLOADER_MIDDLEWARES,
                "DOWNLOAD_HANDLERS": DOWNLOAD_HANDLERS,
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()

    def start_requests(self) -> Iterable[Request]:
        sql = "select app_number, patent_type, applicant_name from net_patent_clean where taxpayer_id = '111302276610726425' and law_status is null"
        datas = self.to_db.find(sql, to_json=True)
        for data in datas:
            pub_type_zh = data["patent_type"]
            app_number = data["app_number"]
            applicant_name = data["applicant_name"]
            if "外观" in pub_type_zh:
                pub_type = "9"
            elif "实用" in pub_type_zh:
                pub_type = "6"
            else:
                pub_type = "3"
            url = "http://epub.cnipa.gov.cn/SW/SwListQuery"
            data = {
                "sortField": "ggr_desc",
                "pageSize": "10",
                "pageNum": "1",
                "swInfo.SwType": "",
                "swInfo.PubType": "",
                "isFmsq": "false",
                "isXx": "false",
                "isWg": "false",
                "swInfo.An": app_number,
                "swInfo.SwPubdate": "",
                "swInfo.SwInfo": "",
                "trsSql": "",
                "__RequestVerificationToken": "",
            }
            yield scrapy.FormRequest(
                url,
                formdata=data,
                callback=self.parse_detail_by_query,
                cb_kwargs=dict(
                    app_number=app_number,
                    patent_type=pub_type_zh,
                    applicant_name=applicant_name,
                    pub_type=pub_type,
                ),
            )

    def parse_detail_by_query(self, response, **kwargs):
        datas = []
        app_number = kwargs["app_number"]
        patent_type = kwargs["patent_type"]
        applicant_name = kwargs["applicant_name"]
        for tr in response.xpath("//tr"):
            item = {
                "app_number": tr.xpath('string(./td[@class="name"])').get().strip(),
                "date": tr.xpath('string(./td[@class="num"])').get().strip(),
                "status": tr.xpath('string(./td[@class="title"])').get().strip(),
                "uni": tr.xpath(".//a[1]/@onclick").re_first(r"\(\'(.*?)\'"),
            }
            if item["app_number"]:
                datas.append(item)
                yield NetPatentLawStatusItem(**item)
                yield from self.gen_detail_request_by_click(
                    item["uni"],
                    kwargs["pub_type"],
                    "",
                    app_number=kwargs["app_number"],
                    patent_type=kwargs["patent_type"],
                    applicant_name=kwargs["applicant_name"],
                )

        if datas:
            yield NetPatentProUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": datas[-1]["status"],
                    "applicant_name": applicant_name,
                }
            )
            yield NetPatentProRowsUpdatePatentStatusItem(
                **{
                    "app_number": app_number,
                    "patent_type": patent_type,
                    "patent_status": datas[-1]["status"],
                    "applicant_name": applicant_name,
                }
            )


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl patent_sw".split())


def run_mul(max_workers=5):
    pool = multiprocessing.Pool(max_workers)
    for _ in range(max_workers):
        pool.apply_async(run)
    pool.close()
    pool.join()


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行专利爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker)
