"""
重试中间件
1.状态码不对的，肯定需要重试
408,429，500,502,503,504，522,524
2.哪些状态码不需要重试
200-300 ， 403,404
3.请求期间报错的，我们需要收集这些错误，当捕获到这些异常时，就需要去进行重试
"""
from asyncio.exceptions import TimeoutError
from aiohttp import ClientTimeout, ClientConnectorSSLError, ClientResponseError, \
    ClientConnectionError
from aiohttp.client_exceptions import ClientPayloadError, ClientConnectorError
from anyio import EndOfStream
from httpcore import ReadError
from httpx import RemoteProtocolError, ConnectError, ReadTimeout
from requests.exceptions import RetryError

from bald_spider.stats_collector import StatsCollector
from bald_spider.utils.log import get_logger

_retry_exceptions = [
    ClientConnectorError,
    ClientTimeout,
    ClientConnectorSSLError,
    ClientResponseError,
    RemoteProtocolError,
    ReadError,
    ConnectError,
    EndOfStream,
    TimeoutError,
    ClientPayloadError,
    ReadTimeout,
    ClientConnectionError

]


class Retry:
    def __init__(
            self,
            *,
            retry_http_codes,
            ignore_http_codes,
            max_retry_times,
            retry_exceptions,
            retry_priority,
            stats: StatsCollector
    ):
        self.retry_http_codes = retry_http_codes
        self.ignore_http_codes = ignore_http_codes
        self.max_retry_times = max_retry_times
        self.retry_exceptions = tuple(retry_exceptions + _retry_exceptions)
        self.stats = stats
        self.retry_priority = retry_priority
        self.logger = get_logger(self.__class__.__name__)

    @classmethod
    def create_instance(cls, crawler):
        o = cls(
            retry_http_codes=crawler.settings.getlist("RETRY_HTTP_CODES"),
            ignore_http_codes=crawler.settings.getlist("IGNORE_HTTP_CODES"),
            max_retry_times=crawler.settings.getint("MAX_RETRY_TIMES"),
            retry_exceptions=crawler.settings.getlist("RETRY_EXCEPTIONS"),
            retry_priority=crawler.settings.getint("RETRY_PRIORITY"),
            stats=crawler.stats
        )
        return o

    def process_exception(self, request, exception, spider):
        if (
                isinstance(exception, self.retry_exceptions)
                and not request.meta.get('dont_retry', False)
        ):
            return self._retry(request, type(exception).__name__, spider)

    def process_response(self, request, response, spider):
        if request.meta.get('dont_retry', False):
            return response
        elif response.status in self.ignore_http_codes:
            return response
        elif response.status in self.retry_http_codes:
            reason = f"response code: {response.status}"
            return self._retry(request, reason, spider) or response
        return response

    def _retry(self, request, reason, spider):
        # 去重逻辑还没写，要保证重试的请求不会被过滤掉
        retry_times = request.meta.get('retry_times', 0)
        if retry_times < self.max_retry_times:
            retry_times += 1
            self.logger.info(f"{spider} {request} {reason} retrying {retry_times} times...")
            request.meta['retry_times'] = retry_times
            request.dont_filter = False
            request.priority = request.priority + self.retry_priority
            self.stats.inc_value("retry_count")
            return request
        else:
            self.logger.warning(f"{spider} {request} {reason} retry max {self.max_retry_times} times, give up...")
            return None
