# -*- coding: utf-8 -*-
__author__ = 'lztkdr'
__date__ = '2019/7/26 14:27'

from scrapy.http.response.html import HtmlResponse
from scrapy.http import Request
import json,time,datetime

class GuaZiScrapyMiddleware(object):
    """
    瓜子网爬虫 Selenium Scrapy 下载中间件
    """
    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request  返回 None 继续请求
        # - or return a Response object  返回 Response 修改响应 ，不走 下载中间件
        # - or return a Request object   返回 Request 修改请求， 继续 执行请求
        # - or raise IgnoreRequest: process_exception() methods of 抛出一个异常
        #   installed downloader middleware will be called  调用已安装的 下载中间件

        if request.meta:
            print(f"进入{spider.name}  {request.url} process_request：{ json.dumps(request.meta) }")

        # request.meta 会包含 start_requests yield Request 参数的 meta 数据
        if spider.name == "guazi":
            type = request.meta.get("type", None)
            if type == "search_keyword":
                # 获取到要搜索的关键字
                keyword  = request.meta.get("keyword", None)
                # 两种方式的搜索
                if spider.mode == 1: # 通过 url 实现 关键字搜索

                    return Request(spider.search_keyword_url,headers = request.headers,callback = spider.parse_search_keyword)

                else: #通过 启动 selenium 浏览器，键入搜索框关键字，进行搜索

                    time.sleep(2)
                    spider.driver.find_element_by_xpath("//input[@name='keyword']").send_keys(keyword)
                    time.sleep(1)
                    spider.driver.find_element_by_xpath("//button[@class='search-btn']").click()
                    time.sleep(1)

                    return HtmlResponse(
                        url = spider.driver.current_url,
                        body = spider.driver.page_source,
                        request = request,
                        encoding =  request.encoding
                    )
            elif type == "nextpage":
                if spider.mode == 1 :
                    return None
                else:
                    url = request.meta.get("url", None)
                    spider.driver.get(url)
                    time.sleep(2)
                    return HtmlResponse(
                        url=spider.driver.current_url,
                        body=spider.driver.page_source,
                        request=request,
                        encoding=request.encoding
                    )
            elif type == "get_detail":
                if spider.mode == 1:
                    return None
                else:
                    spider.driver.get(request.url)
                    time.sleep(2)
                    return HtmlResponse(
                        url=spider.driver.current_url,
                        body=spider.driver.page_source,
                        request=request,
                        encoding=request.encoding
                    )
            else:
                return None
        else:
            return None
    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest

        if response.status != 200:
            curr_time = datetime.datetime.now()
            last_time = spider.last_update_time
            lave_secounds = (curr_time - last_time).seconds
            print(f"{response.url}, response.status:{response.status},{curr_time} - {last_time},剩余：{lave_secounds} 秒后更换 Proxy/UserAgent")
            return request
        return response

    def process_exception(self, request, exception, spider):
        print(f"process_exception:{exception}")
        return request

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)