# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
import time
from typing import Optional

import ddddocr
from faker import Faker
from retrying import retry
from scrapy import signals

from components.middlewares.downloadmiddlewares.private.rs_6_middlewares import Rs6RetryMiddleware
from components.middlewares.downloadmiddlewares.public.company_ip_by_api import CompanyIpByApiMiddleware
from utils.proxies_tools import get_company_ip_crawler_by_api
from utils.user_pool import GuestUserPool, GuestUser

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter


class BlackListSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class BlackListDownloaderMiddleware(Rs6RetryMiddleware, CompanyIpByApiMiddleware, GuestUserPool):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    def __init__(self, settings):
        CompanyIpByApiMiddleware.__init__(self, settings)
        Rs6RetryMiddleware.__init__(self, settings)
        GuestUserPool.__init__(self, redis_key="black_list", min_users=10, redis_kwargs={"url": settings.get("REDIS_URL")})

    def login(self) -> Optional[GuestUser]:
        self.session.cookies.clear_session_cookies()
        self.session.proxies = get_company_ip_crawler_by_api()
        user_agent = Faker().user_agent()
        self.session.headers = {
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Origin": "https://www.creditchina.gov.cn",
            "Pragma": "no-cache",
            "Referer": "https://www.creditchina.gov.cn/",
            "User-Agent": user_agent,
        }
        info_url = 'https://public.creditchina.gov.cn/private-api/info'
        response = self._request(method="GET", url=info_url, retry_times=0)
        if response.status_code == 200:
            self.session.cookies.update({"cookienum": str(random.random())})
            url = f"https://public.creditchina.gov.cn/private-api/verify/getVerify?_v={random.random()}"
            img = self._request(url=url, method="GET")
            ocr = ddddocr.DdddOcr(show_ad=False)
            result = ocr.classification(img.content)
            url = "https://public.creditchina.gov.cn/private-api/verify/checkVerify"
            data = {"verifyInput": result}
            response = self._request(url=url, data=data, method="POST")
            if response.status_code == 200:
                return GuestUser(user_id=int(time.time() * 1000), user_agent=user_agent, proxies=self.session.proxies, cookies=self.session.cookies.get_dict())
            else:
                raise Exception(response.text[:100])
        else:
            raise Exception(response.text[:100])

    @retry(stop_max_attempt_number=5)
    def _request(self, **kwargs):
        allowed_methods_kwargs = [
            # "method", "url",
            "params", "data", "headers", "cookies", "files", "auth", "timeout", "allow_redirects", "proxies", "hooks",
            "stream", "verify", "cert", "json"
        ]
        if (kwargs.get("retry_times") or 0) >= 5:
            print("retry times is too many")
            return None
        request_kwargs = {k: v for k, v in kwargs.items() if k in allowed_methods_kwargs}
        response = self.session.request(kwargs.get('method'), kwargs.get('url'), **request_kwargs, timeout=10)
        if response.status_code in [202, 412]:
            rs_cookie = self.gen_new_cookie(response, base_cookie=self.session.cookies.get_dict())
            self.session.cookies.update(rs_cookie)
            raise Exception("rs_cookie")
        return response

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        self.run()
        user = self.get_user()
        if user:
            new_proxy = user.proxies.get("http")
            request.meta["user_id"] = user.user_id
            request.meta["proxy"] = new_proxy
            request.meta["http_proxy"] = new_proxy
            request.meta["proxies_info"] = new_proxy  # 代理原始信息
            request.headers.update({"User-Agent": user.user_agent})
            request.headers.update({"Cookie": '; '.join([f"{k}={v}" for k, v in user.cookies.items()])})
        else:
            request.headers.update({"User-Agent": Faker().user_agent()})
            CompanyIpByApiMiddleware.process_request(self, request, spider)

        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        if response.status in [500, 400, 403]:
            if 'user_id' in request.meta:
                self.del_user(request.meta["user_id"])
            return self._retry(request, "status_code is 500", spider)
        response = Rs6RetryMiddleware.process_response(self, request, response, spider)
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        if 'user_id' in request.meta:
            self.del_user(request.meta["user_id"])
        request = CompanyIpByApiMiddleware.process_exception(self, request, exception, spider)
        self.process_request(request, spider)
        return request

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)
        self.run()
