# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
from selenium import webdriver
from scrapy.http import HtmlResponse


class SeleniumMiddleware:
    """
    自定义的middleware,用于实现爬取数据时,自动调用selenium进行抓取数据.
    """

    def __init__(self):
        # 需要先自定义一个属性.
        self.driver = None
        # pass

    def process_request(self, request, spider):
        """
        这个函数会在每次抓取页面数据前调用,因此,我们可以进行筛选不需要进行selenium获取数据的页面,从而提高效率.
        :param request:
        :param spider:
        :return:
        """
        print("SeleniumMiddleware ---> process_request")
        print("SeleniumMiddleware----->" + request.url)
        # if request.url == "https://www.baidu.com":
        # 判断需要使用selenium模拟的请求.
        if str(request.url).endswith("baidu.com"):
            """
            ---> selenium仅仅是打开页面,并获取内容.
            1.创建一个webdriver对象,selenium会启动浏览器对象.
            2.获取访问地址,并使用selenium进行访问.
            3.获取页面内容. page_source
            4.将 页面返回给 scrapy. 此结果最终传递给我们的spider 处理逻辑(也就是spider我们自己写的逻辑 parse 函数中.)
            """
            self.driver = webdriver.Firefox()
            print("selenium处理请求----->" + request.url)
            self.driver.get(request.url)
            time.sleep(3)
            content = self.driver.page_source
            self.driver.close()
            # 将页面内容返回给spider进行处理.
            return HtmlResponse(url=request.url, body=content, request=request, encoding='utf-8')
        else:
            print("无需selenium处理----->" + request.url)
            """
            不需要selenium进行抓取的,返回None后, scrapy会自己抓取.
            """
            return None
        pass

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.
        """
        每次抓取的响应,都会经过此函数进行处理.
        如果不需要人工干预,直接返回response即可.
        """
        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        print("process_response -->"+request.url)
        print("SeleniumMiddleware ---> process_response")

        return response


class SeleniumMiddleWareTest:
    """
    scrapy集成selenium,进行模拟浏览器抓取数据,避免探测出爬虫,导致IP被禁
    """
    def __init__(self):
        self.driver = webdriver.Edge()

    def __del__(self):
        self.driver.close()

    def process_request(self, request, spider):
        self.driver.get(url=request.url)
        content = self.driver.page_source
        # self.driver.quit()
        return HtmlResponse(url=request.url,body=content,request=request,encoding='utf-8')


class ProxyMiddleWare:
    """
    添加代理
    """
    def process_request(self,request,spider):
        """
        每个请求都会被这个request处理. 请求先经过这个函数处理.
        只有配置在setting中,才会生效.
        """
        proxy = "http://122.9.101.6:8888"
        request.meta['proxy'] = proxy

class ProjectdemoSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class ProjectdemoDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)
