#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/27 11:34
# @Author  : 王凯
# @File    : patent_week.py
# @Project : scrapy_spider
import sys
from pathlib import Path
from typing import Iterable, Any

import scrapy
from scrapy import Request
from scrapy.http import Response

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.patent.patent.spiders import PatentParser
from apps.patent.patent.spiders.patent_search import CAT_MAPPING
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB


class PatentWeekReport(scrapy.Spider, PatentParser):
    name = "patent_week_report"
    pg_db = None
    wfq_source_db = None
    custom_settings = {
        "HTTPERROR_ALLOWED_CODES": [400, 404, 500, 200, 202, 502, 503, 412, 202],
        "RETRY_TIMES": 100,
        "RETRY_HTTP_CODES": [400, 412, 202],
    }

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.pg_db = PostgresqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    def start_requests(self) -> Iterable[Request]:
        yield scrapy.Request("http://epub.cnipa.gov.cn", callback=self.index_requests, dont_filter=True)

    def index_requests(self, response, **kwargs):
        ggr_begin = response.xpath("//a[contains(@onclick, 'zl_Show')]").re_first(r"zl_Show\('(\d+)")
        self.logger.info(f"正在爬取第【{ggr_begin}】期")
        if ggr_begin:
            url = "http://epub.cnipa.gov.cn/Dxb/PageQuery"
            for pub_type, v in CAT_MAPPING.items():
                data = {
                    "searchCatalogInfo.Pubtype": pub_type,
                    "searchCatalogInfo.Ggr_Begin": ggr_begin,
                    "searchCatalogInfo.Ggr_End": "",
                    "searchCatalogInfo.Pd_Begin": "",
                    "searchCatalogInfo.Pd_End": "",
                    "searchCatalogInfo.An": "",
                    "searchCatalogInfo.Pn": "",
                    "searchCatalogInfo.Ad_Begin": "",
                    "searchCatalogInfo.Ad_End": "",
                    "searchCatalogInfo.E71_73": "",
                    "searchCatalogInfo.E72": "",
                    "searchCatalogInfo.Edz": "",
                    "searchCatalogInfo.E51": "",
                    "searchCatalogInfo.Ti": "",
                    "searchCatalogInfo.Abs": "",
                    "searchCatalogInfo.Edl": "",
                    "searchCatalogInfo.E74": "",
                    "searchCatalogInfo.E30": "",
                    "searchCatalogInfo.E66": "",
                    "searchCatalogInfo.E62": "",
                    "searchCatalogInfo.E83": "",
                    "searchCatalogInfo.E85": "",
                    "searchCatalogInfo.E86": "",
                    "searchCatalogInfo.E87": "",
                    "pageModel.pageNum": "1",
                    "pageModel.pageSize": "10",
                    "sortFiled": "ggr_desc",
                    "searchAfter": "",
                    "showModel": "1",
                    "isOr": "False",
                    "__RequestVerificationToken": "",
                }
                yield scrapy.FormRequest(
                    url, formdata=data, callback=self.parse, cb_kwargs=dict(pub_type=pub_type, ggr_begin=ggr_begin),
                    dont_filter=True
                )
        else:
            self.logger.error(f"正在爬取第【{ggr_begin}】期 失败")

    def parse(self, response: Response, **kwargs: Any) -> Any:
        yield from self.parse_page_list(response, **kwargs)  # 解析当前列表页

        url = "http://epub.cnipa.gov.cn/Dxb/PageQuery"
        total = response.xpath("//script").re_first(r"total_item:\s*(\d+)")
        pub_type = kwargs["pub_type"]
        ggr_begin = kwargs["ggr_begin"]
        current_page = response.xpath("//script").re_first(r"current_page:\s*(\d+)")

        last_an = response.xpath('//input[@id="lastAn"]/@value').get()
        last_ggr = response.xpath('//input[@id="lastGgr"]/@value').get()

        if total:
            self.logger.info(f"爬取[{kwargs['ggr_begin']}:{kwargs['pub_type']}]数量：{current_page}/{total}")
            next_page = int(current_page) + 1
            if next_page <= int(total):
                data = {
                    "searchCatalogInfo.Pubtype": pub_type,
                    "searchCatalogInfo.Ggr_Begin": ggr_begin,
                    "searchCatalogInfo.Ggr_End": "",
                    "searchCatalogInfo.Pd_Begin": "",
                    "searchCatalogInfo.Pd_End": "",
                    "searchCatalogInfo.An": "",
                    "searchCatalogInfo.Pn": "",
                    "searchCatalogInfo.Ad_Begin": "",
                    "searchCatalogInfo.Ad_End": "",
                    "searchCatalogInfo.E71_73": "",
                    "searchCatalogInfo.E72": "",
                    "searchCatalogInfo.Edz": "",
                    "searchCatalogInfo.E51": "",
                    "searchCatalogInfo.Ti": "",
                    "searchCatalogInfo.Abs": "",
                    "searchCatalogInfo.Edl": "",
                    "searchCatalogInfo.E74": "",
                    "searchCatalogInfo.E30": "",
                    "searchCatalogInfo.E66": "",
                    "searchCatalogInfo.E62": "",
                    "searchCatalogInfo.E83": "",
                    "searchCatalogInfo.E85": "",
                    "searchCatalogInfo.E86": "",
                    "searchCatalogInfo.E87": "",
                    "pageModel.pageNum": f"{next_page}",
                    "pageModel.pageSize": "10",
                    "sortFiled": "ggr_desc",
                    "searchAfter": f"{last_ggr};{last_an}",
                    "showModel": "1",
                    "isOr": "False",
                    "__RequestVerificationToken": "",
                }
                yield scrapy.FormRequest(
                    url,
                    formdata=data,
                    callback=self.parse,
                    cb_kwargs=dict(
                        pub_type=pub_type,
                        ggr_begin=ggr_begin,
                    ),
                )


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl patent_week_report".split())
