# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import time

from selenium.webdriver.support.wait import WebDriverWait
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from scrapy.http.response.html import HtmlResponse


class JinashuDownloaderMiddleware(object):

    def __init__(self):
        self.driver = webdriver.Chrome()

    def process_request(self, request, spider):
        # 拦截然request请求然后用selenium去请求
        self.driver.get(request.url)


        next_ele = "//div[@role='main']/div[position()=1]/section[last()]/div[position()=1]/div"
        WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, next_ele)))

        while True:
            try:
                next_btn = self.driver.find_element_by_xpath(next_ele)
                self.driver.execute_script('arguments[0].click();', next_btn)
            except:
                break

        # 把selenium获得的网页数据创建一个response对象返回给spider
        response = HtmlResponse(request.url,body=self.driver.page_source,request=request,encoding='utf-8')
        return response


