# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals

from scrapy.http import HtmlResponse  #调用构造方法
from time import sleep
#from selenium.webdriver.chrome.options import Options





# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter

class WangyiproDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    # @classmethod
    # def from_crawler(cls, crawler):
    #     # This method is used by Scrapy to create your spiders.
    #     s = cls()
    #     crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
    #     return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None
    #该方法拦截五大板块对应的相应对象，进行篡改
    def process_response(self, request, response, spider):#spider是爬虫对象

        #return response
        #1挑选出指定的相应对象进行篡改
        #2通过url指定requests
        #通过requests指定response
        if request.url in spider.modol_list:#如果requests.url的url列表在 spider列表当中

            bro = spider.bro
            bro.implicitly_wait(10)
            #print(request.url)
            bro.get(request.url)#对五大板块对应的url进行请求
            sleep(1)
            bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
            sleep(1)
            bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')

            page_texts=bro.page_source #返回的数据包含了动态加载的新闻数据
            # sleep(1)
            # # 翻到页底
            # browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
            # # 点击加载更多
            # bro.find_element(By.CSS_SELECTOR, '.load_more_btn').click()
            # sleep(1)
            # # 再次翻页到底
            # bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
            sleep(1)


            #response #对应五大板块的相应对象
            #针对定位到的这些response进行篡改
            #要实例化一个新的相应对象，（符合需求：包含动态加载出的新闻数据），替代原来的没有加载处理的数
            # 据
            #如何获取新的响应数据，动态加载的数据、
            #基于selenium 边界的获取动态加载的数据
            new_response=HtmlResponse(url=request.url,body=page_texts,encoding='utf-8',request=request) #url 是请求对象，请求对象是requests.url
            #body 是返回的响应数据 encoding是字符编码，requests还是requests
            return new_response
        else:
            return response
            #其他的url相应的对象
    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass
    #
    # def spider_opened(self, spider):
    #     spider.logger.info('Spider opened: %s' % spider.name)
