#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: xaoyaoyao
@contact: xaoyaoyao@aliyun.com
@file: selenium_ware.py
@time: 2018/08/25
"""
import time
import random
import logging
from scrapy import signals
from scrapy.http.response.html import HtmlResponse
from article_spider.utils.tools import Tools
from article_spider.settings import AJAX_DOMAINS, IFRAME_DOMAINS, AJAX_MAX_PAGE_SIZE


class SeleniumDownloadMiddleware(object):
    def process_request(self, request, spider):
        url = request.url
        logging.info('SeleniumDownloadMiddleware url >> %s', url)
        domain = Tools.domain(url)
        if domain is None:
            pass
        if domain in AJAX_DOMAINS:
            spider.browser.get(url)
            ## more
            self.handle_more(url, spider)
            time.sleep(random.uniform(2, 3))
            response = HtmlResponse(url=spider.browser.current_url, body=spider.browser.page_source, request=request, encoding='utf-8')
            return response
        elif domain in IFRAME_DOMAINS:
            spider.browser.get(url)
            spider.browser.switch_to.frame(spider.iframe)
            html = spider.browser.page_source + ''
            self.handle_more(url, spider)
            time.sleep(random.uniform(2, 3))
            response = HtmlResponse(url=spider.browser.current_url, body=html, request=request, encoding='utf-8')
            return response
        else:
            pass

    def handle_more(self, url, spider):
        if str(url).startswith('https://www.zhihu.com/api/v4/articles/'):
            pass
        ## zhihu
        if url == 'https://www.zhihu.com/explore/recommendations':
            index = 0
            try:
                while True:
                    index += 1
                    more = spider.browser.find_element_by_css_selector("a.zg-btn-white.zu-button-more")
                    if not more:
                        break
                    more.click()
                    time.sleep(random.uniform(1, 2))
                    if index > AJAX_MAX_PAGE_SIZE:
                        break
            except Exception as e:
                logging.error("[ZhihuSpider] error. The msg %s", str(e))
        elif str(url).startswith('https://www.zhihu.com/question/'):
            more = spider.browser.find_element_by_css_selector("a.QuestionMainAction")
            if not more:
                pass
            more.click()
            time.sleep(random.uniform(1, 3))
        else:
            pass

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def spider_opened(self, spider):
        logging.info('pider_opened')
