"""Download handlers for http and https schemes"""

import logging
from time import time
from urllib.parse import urldefrag

from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.http import HtmlResponse
from twisted.internet import threads
from twisted.internet.error import TimeoutError
from twisted.web.client import ResponseFailed
from apps.patent.patent.spiders.patent_normal_user import PatentNormalUserPool
from utils.rs_utils.rs6 import Rs6Session

logger = logging.getLogger(__name__)


class RS6DownloadHandler(HTTP11DownloadHandler):
    use_rs_user_key = None

    def __init__(self, settings, crawler=None):
        super().__init__(settings, crawler)
        self.debug = settings.get("RS_LOG_LEVEL") == "DEBUG"

    @classmethod
    def from_crawler(cls, crawler):
        cls.use_rs_user_key = crawler.settings.get("USE_RS_USER_KEY", None)
        if cls.use_rs_user_key:
            cls.user_pool = PatentNormalUserPool()
            crawler.user_pool = cls.user_pool
        else:
            cls.user_pool = None
        return cls(crawler.settings, crawler)

    def download_request(self, request, spider):
        from twisted.internet import reactor

        timeout = request.meta.get("download_timeout") or 10
        # request details
        url = urldefrag(request.url)[0]
        start_time = time()
        # Embedding the provided code asynchronously
        d = threads.deferToThread(self._async_download, request)
        # set download latency
        d.addCallback(self._cb_latency, request, start_time)
        # check download timeout
        self._timeout_cl = reactor.callLater(timeout, d.cancel)
        d.addBoth(self._cb_timeout, url, timeout)
        return d

    def _async_download(self, request):
        try:
            Rs6Session(debug=self.debug).session.cookies.clear()
            headers = dict(request.headers.to_unicode_dict())
            timeout = int(request.meta.get("download_timeout"))
            if self.user_pool:
                user = self.user_pool.get_user()
                proxies = user.proxies
                headers.update({"User-Agent": user.user_agent})
                request.meta["user_id"] = user.user_id
            else:
                proxy = request.meta.get("proxies_info")
                proxy = proxy or request.meta.get("proxies")
                proxy = proxy or None
                proxies = None
                if proxy:
                    proxies = {"http": proxy, "https": proxy}
            response = Rs6Session(debug=self.debug).request_core(
                proxies=proxies,
                url=request.url,
                method=request.method,
                headers=headers,
                data=request.body,
                cookies=request.cookies,
                verify=False,
                timeout=timeout,
            )
            return HtmlResponse(
                url=request.url,
                status=response.status_code,
                body=response.content,
                encoding="utf-8",
                request=request,
            )
        except Exception as e:
            if self.user_pool and request.meta.get("user_id"):
                self.user_pool.del_user(request.meta["user_id"])
                self._crawler.spider.logger.warning(f"del user: {request.meta['user_id']} because {e}")
            raise ResponseFailed(e)

    def _cb_timeout(self, result, url, timeout):
        if self._timeout_cl.active():
            self._timeout_cl.cancel()
            return result
        raise TimeoutError(f"Getting {url} took longer than {timeout} seconds.")

    def _cb_latency(self, result, request, start_time):
        request.meta["download_latency"] = time() - start_time
        return result
