# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
import logging
import random

import time
from scrapy import signals, Item
from scrapy.http import HtmlResponse, Request
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.support.wait import WebDriverWait


class TimelineFilterSpiderMiddleware(object):
    @classmethod
    def from_settings(cls, settings):
        latest_issue_date = settings['TIMELINEFILTER_DATETIME']
        if isinstance(latest_issue_date, str):
            latest_issue_date = time.strptime(latest_issue_date, '%Y-%m-%d')
        return cls(latest_issue_date)

    # @classmethod
    # def from_crawler(cls, crawler):
    #     return cls(time.localtime())

    def __init__(self, latest_issue_date):
        self.latest_issue_date = latest_issue_date

    def process_spider_output(self, response, result, spider):
        for x in result:
            if isinstance(x, Request):
                yield x  # bypass all the request
            else:
                item = x
                issue_date = item['issue_date']
                if isinstance(issue_date, time.struct_time): # effective only when the issue_date is struct_time time.
                    if issue_date >= self.latest_issue_date:
                        yield x
                    else:
                        logging.info("doc %s issue_date %s, is filtered since the issue_date is expired" % (item['title'], item['issue_date']))
                else:
                    yield x


class RandomUserAgentMiddleware(object):
    """Randomly rotate user agents based on a list of predefined ones"""
    def __init__(self, agents):
        self.agents = agents

    @classmethod
    def from_crawler(cls, crawler):
        # user_agents = crawler.settings.getlist('USER_AGENTS')
        user_agents = cls._get_user_agents(crawler)
        # a = cls(user_agents)
        return cls(user_agents)

    @classmethod
    def _get_user_agents(cls, crawler):
        user_agents = crawler.settings.getlist('USER_AGENTS')
        return user_agents

    def process_request(self, request, spider):

        # print "**************************" + random.choice(self.agents)
        request.headers['User-Agent'] = random.choice(self.agents)
        # request.headers.setdefault('User-Agent', random.choice(self.agents))
        logging.info("request to %s..." % request)
        logging.info("request headers are %s..." % request.headers)


class WechatRandomUserAgentMiddleware(RandomUserAgentMiddleware):
    @classmethod
    def _get_user_agents(cls, crawler):
        user_agents = crawler.settings.getlist('USER_AGENTS_WECHAT')
        return user_agents


class JavaScriptMiddleware(object):

    # @classmethod
    # def from_crawler(cls, crawler):
    #     agents = crawler.settings.getlist('USER_AGENTS');
    #     a = cls(agents)
    #     return cls(agents)
    def _init_selenium_driver(self, user_agent, engine_name):
        if engine_name == "CHROME":
            return self._init_chrome_driver(user_agent)
        elif engine_name == "FIREFOX":
            return self._init_firefox_driver(user_agent)
        else:
            return self._init_phantomjs_driver(user_agent)

    def _init_chrome_driver(self, user_agent):
        logging.info("chrome (headless mode) is starting. required chrome version above 59...")
        from selenium.webdriver.chrome.options import Options
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        self.driver = webdriver.Chrome(chrome_options=chrome_options)
        self.driver.set_page_load_timeout(60);

    def _init_firefox_driver(self, user_agent):
        logging.info("firefox is starting...")
        self.driver = webdriver.Firefox();
        self.driver.set_page_load_timeout(60);

    def _init_phantomjs_driver(self, user_agent):
        logging.info("phantomJs is starting...")
        caps = DesiredCapabilities.PHANTOMJS
        caps["phantomjs.page.settings.resourceTimeout"] = 60000  # timeout after 60s
        caps["phantomjs.page.settings.loadImages"] = False  # 不载入图片，爬页面速度会快很多
        caps["phantomjs.page.settings.userAgent"] = user_agent
        self.driver = webdriver.PhantomJS()  # 指定使用的浏览器
        self.driver.set_page_load_timeout(60);

    def __init__(self):
        self.count = 0;
        pass

    def _processing_response(self, request, response, spider):
        raise NotImplementedError

    def _before_request(self, request, spider, driver):
        pass

    def process_request(self, request, spider):
        if spider.settings['SELENIUM_REQUEST_ENABLED']:
        # if hasattr(spider, 'enabled_selenium_request') and spider.enabled_selenium_request:
            # use to change user agent, every 50 request, would require a new webdrvier
            # print "use phantomJs inteceptor to get response..."

            if self.count % 50 == 0:  # every 50 requests, change a random user-agent
                logging.info("change a random user-agent to %s..." % request.headers['User-Agent'])
                self._init_selenium_driver(request.headers['User-Agent'], spider.settings['SELENIUM_ENGINE'])
                # self._init_phantomjs_driver(request.headers['User-Agent'])
            self.count = self.count + 1
            # caps = DesiredCapabilities.PHANTOMJS
            # caps["phantomjs.page.settings.userAgent"] = request.headers['User-Agent']
            # print request.headers['User-Agent'];
            # self.driver.capabilities = caps;
            self._before_request(request, spider, self.driver)
            logging.debug("before selenium method to get url...")
            self.driver.get(request.url)
            logging.debug("after selenium method to get url...")
            import time
            time.sleep(1)
            response = self.driver.page_source
            html_response = self._processing_response(request, response, spider)

            return html_response


class JavaScriptListPageMiddleware(JavaScriptMiddleware):
    def _processing_response(self, request, response, spider):
        url_set = ();
        if request.url in spider.start_urls:
            logging.debug("parsing list urls...")
            url_set = self._parse_list(request, response, spider);

            # 不需要放到下载器，去下载页面，直接renturn
            htmlResponse = HtmlResponse(self.driver.current_url, body=response, encoding='utf-8', request=request)
            if request.url in spider.start_urls:
                htmlResponse.url_set = url_set

            return htmlResponse;

class ScrapySinahealthSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)
