# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import re

import requests
from faker import Faker
from scrapy import signals

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
from scrapy.exceptions import IgnoreRequest

from apps.creadit_grade_a.creadit_grade_a.middlewares import HunanEncrypt
from components.middlewares.downloadmiddlewares.private.rs_6_middlewares import Rs6RetryMiddleware
from components.middlewares.downloadmiddlewares.public.company_ip_by_api import CompanyIpByApiMiddleware
from utils.tools import parse_url_params, unquote_url


class TaxIllegalSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class TaxIllegalDownloaderMiddleware(Rs6RetryMiddleware, CompanyIpByApiMiddleware):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    def __init__(self, settings):
        CompanyIpByApiMiddleware.__init__(self, settings)
        Rs6RetryMiddleware.__init__(self, settings)
        self.session = requests.Session()

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls(settings=crawler.settings)
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        root_url, params = parse_url_params(unquote_url(request.body.decode()))

        src_headers = request.headers.to_unicode_dict()
        src_proxies = request.meta.get("proxies_info")

        request.headers.update({"User-Agent": Faker().user_agent()})
        CompanyIpByApiMiddleware.process_request(self, request, spider)
        headers = request.headers.to_unicode_dict()
        proxies = {
            "https": request.meta.get("proxies_info"),
            "http": request.meta.get("proxies_info"),
        } if request.meta.get("proxies_info") else {}

        if spider.name in ['shanghai_illegal', 'shanxi_illegal']:
            request.meta["proxy"] = None
            request.meta["http_proxy"] = None
            request.meta["proxies_info"] = None
            request.headers.update({"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"})

        if spider.name == 'hunan_illegal':
            self.session.cookies.clear_session_cookies()
            url = "https://hunan.chinatax.gov.cn/index"
            response = self.session.request(url=url, method='GET', headers=headers, proxies=proxies, timeout=5)
            cookies = {}
            for _ in range(5):
                if response.status_code in [412]:
                    first_cookie = (cookies or response.cookies or self.session.cookies.get_dict()).get("MM_mq4qQammP3BA3")
                    cookies = {**self.session.cookies.get_dict(), **response.cookies.get_dict()}
                    cookies.update(HunanEncrypt().run(str(headers['User-Agent']), first_cookie))
                    self.session.cookies.update(cookies)
                    response = self.session.request(url=url, method='GET', headers=headers, proxies=proxies, timeout=5)
                    cookies = {**self.session.cookies.get_dict(), **response.cookies.get_dict()}
                else:
                    break
            request.headers.update({"Cookie": '; '.join([f"{k}={v}" for k, v in cookies.items() if v])})
            request.cookies.update(cookies)
            csrf = re.findall('"_csrf" content="(.*?)"', response.text)
            if '_csrf' in params and csrf:
                params.update({"_csrf": csrf[0]})
                request._set_body('&'.join(f"{k}={v}" for k, v in params.items()).encode("utf-8"))

        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest

        if spider.name == 'hunan_illegal':
            if response.status in [412, 404]:
                retry_times = request.meta.get('retry_times', 0)
                self.process_request(request, spider)
                request.meta['retry_times'] = retry_times + 1
                return self._retry(request, 'retry', spider)
        response = Rs6RetryMiddleware.process_response(self, request, response, spider)
        if response:
            return response
        raise IgnoreRequest("已经重试最大次数 丢弃")

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)
