#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/1/30 15:10
# @Author  : 王凯
# @File    : company_ip_by_api.py
# @Project : scrapy_spider
from logging import Logger
from typing import Optional, Type, Union

import requests
import twisted.internet.error
from scrapy import Request, Spider
from scrapy.downloadermiddlewares.retry import RetryMiddleware, retry_logger
from scrapy.utils.python import global_object_name

from utils.proxies_tools import get_company_ip_crawler_by_api

NOT_LOGGER_REASON = [
    "init_proxies",
]


def get_retry_request(
        request: Request,
        *,
        spider: Spider,
        reason: Union[str, Exception, Type[Exception]] = "unspecified",
        max_retry_times: Optional[int] = None,
        priority_adjust: Optional[int] = None,
        logger: Logger = retry_logger,
        stats_base_key: str = "retry",
):
    """
    Returns a new :class:`~scrapy.Request` object to retry the specified
    request, or ``None`` if retries of the specified request have been
    exhausted.

    For example, in a :class:`~scrapy.Spider` callback, you could use it as
    follows::

        def parse(self, response):
            if not response.text:
                new_request_or_none = get_retry_request(
                    response.request,
                    spider=self,
                    reason='empty',
                )
                return new_request_or_none

    *spider* is the :class:`~scrapy.Spider` instance which is asking for the
    retry request. It is used to access the :ref:`settings <topics-settings>`
    and :ref:`stats <topics-stats>`, and to provide extra logging context (see
    :func:`logging.debug`).

    *reason* is a string or an :class:`Exception` object that indicates the
    reason why the request needs to be retried. It is used to name retry stats.

    *max_retry_times* is a number that determines the maximum number of times
    that *request* can be retried. If not specified or ``None``, the number is
    read from the :reqmeta:`max_retry_times` meta key of the request. If the
    :reqmeta:`max_retry_times` meta key is not defined or ``None``, the number
    is read from the :setting:`RETRY_TIMES` setting.

    *priority_adjust* is a number that determines how the priority of the new
    request changes in relation to *request*. If not specified, the number is
    read from the :setting:`RETRY_PRIORITY_ADJUST` setting.

    *logger* is the logging.Logger object to be used when logging messages

    *stats_base_key* is a string to be used as the base key for the
    retry-related job stats
    """
    settings = spider.crawler.settings
    assert spider.crawler.stats
    stats = spider.crawler.stats
    retry_times = request.meta.get("retry_times", 0) + 1
    if max_retry_times is None:
        max_retry_times = request.meta.get("max_retry_times")
        if max_retry_times is None:
            max_retry_times = settings.getint("RETRY_TIMES")
    if retry_times <= max_retry_times:
        if reason not in NOT_LOGGER_REASON:
            logger.debug(
                "Retrying %(request)s (failed %(retry_times)d times): %(reason)s",
                {"request": request, "retry_times": retry_times, "reason": reason},
                extra={"spider": spider},
            )
        new_request: Request = request.copy()
        new_request.meta["retry_times"] = retry_times
        new_request.dont_filter = True
        if priority_adjust is None:
            priority_adjust = settings.getint("RETRY_PRIORITY_ADJUST")
        new_request.priority = request.priority + priority_adjust

        if callable(reason):
            reason = reason()
        if isinstance(reason, Exception):
            reason = global_object_name(reason.__class__)

        stats.inc_value(f"{stats_base_key}/count")
        stats.inc_value(f"{stats_base_key}/reason_count/{reason}")
        return new_request
    stats.inc_value(f"{stats_base_key}/max_reached")
    logger.error(
        "Gave up retrying %(request)s (failed %(retry_times)d times): " "%(reason)s",
        {"request": request, "retry_times": retry_times, "reason": reason},
        extra={"spider": spider},
    )
    return None


class CompanyIpByApiMiddleware(RetryMiddleware):
    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        old_proxy = request.meta.get("proxies_info")

        if not old_proxy:
            response = self.set_new_proxy(request, "init_proxies", spider) or response

        return response

    def _get_new_proxy(self):
        return get_company_ip_crawler_by_api().get("http")

    def set_new_proxy(self, request, exception, spider):
        old_proxy = request.meta.get("proxies_info")
        default_retry_keys = "proxies_retry_times"
        retry_times = request.meta.get(default_retry_keys, 0)
        if retry_times < self.max_retry_times:
            new_request: Request = request
            new_request.meta[default_retry_keys] = retry_times + 1
            new_proxy = self._get_new_proxy()
            msg = """
                        -------------- %s.%s [%s] error -------------
                        old proxy      %s
                        new proxy      %s
                        error          %s
                        deal request   %s
                        retry_times    %s/%s
                        """
            if str(exception) not in NOT_LOGGER_REASON:
                spider.logger.error(
                    msg
                    % (
                        spider.__class__.__name__,
                        (
                                request.callback
                                and callable(request.callback)
                                and getattr(request.callback, "__name__")
                                or request.callback
                        )
                        or "parse",
                        self.__class__.__name__,
                        old_proxy,
                        new_proxy,
                        str(exception),
                        request,
                        retry_times,
                        self.max_retry_times,
                    )
                )
            new_request.meta["proxy"] = new_proxy
            new_request.meta["http_proxy"] = new_proxy
            new_request.meta["proxies_info"] = new_proxy  # 代理原始信息
            return self._retry(new_request, exception, spider)

    def process_exception(self, request, exception, spider):
        if isinstance(exception, self.exceptions_to_retry) and not request.meta.get(
            "dont_retry", False
        ):
            return self.set_new_proxy(request, exception, spider)
        if issubclass(
                exception.__class__,
                (
                        twisted.web._newclient.ResponseNeverReceived,
                        requests.exceptions.ProxyError,
                        requests.exceptions.ReadTimeout,
                        requests.exceptions.ConnectionError,
                ),
        ):
            return self.set_new_proxy(request, exception, spider)
        raise exception

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        self.set_new_proxy(request, 'init_proxies', spider)
        return None

    def _retry(self, request, reason, spider):
        max_retry_times = request.meta.get("max_retry_times", self.max_retry_times)
        priority_adjust = request.meta.get("priority_adjust", self.priority_adjust)
        return get_retry_request(
            request,
            reason=reason,
            spider=spider,
            max_retry_times=max_retry_times,
            priority_adjust=priority_adjust,
        )
