# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import re
import uuid

from faker import Faker
from scrapy import signals
from scrapy.exceptions import IgnoreRequest

from apps.jobs.jobs.spiders import CustomException
from components.middlewares.downloadmiddlewares.public.company_ip_by_api import CompanyIpByApiMiddleware
from components.middlewares.downloadmiddlewares.public.useragent_random import RandomUserAgentMiddleware
from utils.proxies_tools import get_company_ip_crawler_by_api


# useful for handling different item types with a single interface


class AcwScV2:
    def __init__(self, response):
        self.response = response

    def get_acw_sc_v2(self, arg1, static_str="3000176000856006061501533003690027800375"):
        s1 = self.get_unsbox(arg1)
        _0x5a5d3b = ""
        for i in range(len(s1)):
            if i % 2 != 0:
                continue
            _0x401af1 = int(s1[i: i + 2], 16)
            _0x105f59 = int(static_str[i: i + 2], 16)
            _0x189e2c_10 = _0x401af1 ^ _0x105f59
            _0x189e2c = hex(_0x189e2c_10)[2:]
            if len(_0x189e2c) == 1:
                _0x189e2c = "0" + _0x189e2c
            _0x5a5d3b += _0x189e2c
        return _0x5a5d3b

    @staticmethod
    def get_unsbox(arg1):
        _0x4b082b = [
            0xF,
            0x23,
            0x1D,
            0x18,
            0x21,
            0x10,
            0x1,
            0x26,
            0xA,
            0x9,
            0x13,
            0x1F,
            0x28,
            0x1B,
            0x16,
            0x17,
            0x19,
            0xD,
            0x6,
            0xB,
            0x27,
            0x12,
            0x14,
            0x8,
            0xE,
            0x15,
            0x20,
            0x1A,
            0x2,
            0x1E,
            0x7,
            0x4,
            0x11,
            0x5,
            0x3,
            0x1C,
            0x22,
            0x25,
            0xC,
            0x24,
        ]
        _0x4da0dc = []
        _0x12605e = ""
        for i in _0x4b082b:
            _0x4da0dc.append(arg1[i - 1])
        _0x12605e = "".join(_0x4da0dc)
        return _0x12605e

    def get_cookies(self, cookie=None):
        if cookie is None:
            cookie = {}
        response_text = self.response.text
        if 'setCookie("acw_sc__v2"' in response_text:
            arg1 = re.findall(r"arg1='(.*?)'", response_text)
            if arg1:
                cookie.update({"acw_sc__v2": self.get_acw_sc_v2(arg1[0])})
        return cookie


class JobsSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class JobsDownloaderMiddleware(CompanyIpByApiMiddleware, RandomUserAgentMiddleware):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        request.headers.update({"User-Agent": Faker().user_agent()})

        if not request.meta.get('acw_sc__v2'):
            CompanyIpByApiMiddleware.process_request(self, request, spider)

        if spider.name == 'liepin_jobs':
            if 'api' in request.url:
                request.headers.update({
                    "Origin": "https://www.liepin.com",
                    "Referer": "https://www.liepin.com/",
                    "X-Client-Type": "web",
                    "X-Fscp-Std-Info": '{"client_id": "40108"}',
                    "X-Fscp-Trace-Id": str(uuid.uuid4()),
                    "X-Fscp-Version": "1.1",
                    "X-Requested-With": "XMLHttpRequest",
                })
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        CompanyIpByApiMiddleware.process_response(self, request, response, spider)
        if spider.name == 'boss_jobs':
            if "禁止访问" in response.text:
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self._retry(new_request, '您的 IP 存在异常访问行为，暂时被禁止访问！', spider) or response
            elif response.status == 302:
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self.set_new_proxy(new_request, "init_proxies", spider)
        elif spider.name == 'liepin_jobs':
            if "Location" in response.headers:
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self._retry(new_request, 'IP不可用，跳转到未知页面', spider) or response
        elif spider.name == 'qcwy_jobs':
            if "acw_sc__v2" in response.text:
                cookie = AcwScV2(response).get_cookies()
                new_request = request.copy()
                new_request.cookies.update(cookie)
                new_request.meta["acw_sc__v2"] = True
                return self._retry(new_request, "acw_sc__v2", spider) or response
            elif "滑动验证页面" in response.text:
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self.set_new_proxy(new_request, "init_proxies", spider)
            elif any(c in response.text for c in ["网络超时，请稍后重试！"]):
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self.set_new_proxy(new_request, "init_proxies", spider)
            elif response.status == 400:
                spider.logger.error(f"{request.url} 400 {response.text}")
        elif spider.name == 'zhilian_jobs':
            if """setCookie("acw_sc__v2", x)""" in response.text:
                cookie = AcwScV2(response).get_cookies()
                new_request = request.copy()
                new_request.cookies.update(cookie)
                new_request.meta["acw_sc__v2"] = True
                return self._retry(new_request, "acw_sc__v2", spider) or response
            elif "滑动验证页面" in response.text:
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self.set_new_proxy(new_request, "init_proxies", spider)
            elif response.status == 302:
                new_request = request.copy()
                new_request.headers.pop("Cookie", None)
                new_request.cookies = {}
                return self.set_new_proxy(new_request, "init_proxies", spider)
            elif response.status == 400:
                spider.logger.error(f"{request.url} 400 {response.text}")
        return response

    def process_exception(self, request, exception, spider):
        if isinstance(exception, CustomException):
            return self.set_new_proxy(request, exception, spider)
        try:
            new_request = CompanyIpByApiMiddleware.process_exception(self, request, exception, spider)
            if new_request:
                return new_request
        except Exception as e:
            raise IgnoreRequest(f"已经重试最大次数 丢弃 {e}")

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)
