from scrapy.http import HtmlResponse

from selenium import webdriver

import time


class JdSpiderMiddleware:
    pass


class JdDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    def __init__(self):
        opt = webdriver.ChromeOptions ()
        # opt.add_argument ('--headless')  # 如果想看到具体的翻页情况（就注释）可以打开浏览器看（蛮有趣的）
        # 建议用有界面的比较好因为可以看到运行情况
        opt.add_argument ('user-agent="Mozilla/5.0 (iPod; U; CPU iPhone OS 2_1 like Mac OS X; ja-jp) '
                          'AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5F137 Safari/525.20"')
        opt.add_argument ('https://book.jd.com/booksort.html')
        self.driver = webdriver.Chrome (options=opt)

    # 其他都不重要只用看下面这个方法就够了
    def process_request(self, request, spider):
        if 'page' in request.url:
            # 直接跳转小分类链接是会出错的必须要加上referer模拟从https://book.jd.com/booksort.html页面点击过来
            self.driver.get (request.url)
            time.sleep(1)
            self.driver.execute_script ('window.scrollTo(0,document.body.scrollHeight)')
            time.sleep (1)
            html = self.driver.page_source
            self.preurl = request.url
            return HtmlResponse (url=request.url, body=html.encode ())
        if "sort?source=bookSort" in request.url:
            # 判断请求的是不是请求大分类和小分类图书的详细信息的接口网址
            # 如果是的话就模拟时间戳凭借url
            print ("请求接口数据")
            t = int (time.time () * 1000)
            request.headers['callback'] = 'jsonp_{0}_2175'.format (t)
            # 这里需要添加从https://book.jd.com/的referer信息
            request.headers['referer'] = 'https://book.jd.com/'
        return None

    def close(self, spider):
        self.driver.quit ()
