# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import random
from selenium import webdriver
from scrapy.http import HtmlResponse
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from pangolin.common.ip_common import IpCommon
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.exceptions import NotConfigured
from pangolin.common.db_operateCommon import DbOperateClass
from pangolin.common.db_operateCommon import *
from pangolin.settings import IS_ADD_PROXY_IP
import os,sys
import logging
from datetime import *
import time
from PIL import Image



class PangolinSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.
    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        spider.logger.debug("#### 33333 response %s , spider %s ####" % (response, spider))
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        spider.logger.debug("#### 44444 response %s , result %s , spider %s ####" % (response, result, spider))
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        spider.logger.debug("spider_exception: %s####%s" % (spider.name, exception.__str__()))
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.
        # Must return only requests (not items).
        spider.logger.debug("#### 2222222 start_requests %s , spider %s ####" % (start_requests, spider))
        for r in start_requests:
            spider.logger.debug("#### one_request %s , spider %s ####" % (r, spider))
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

    def spider_closed(self, spider, reason):
        spider.logger.info('spider_closed: %s ,reason:%s' % (spider.name, reason))


# 设置UA
class RotateUserAgentMiddleware(UserAgentMiddleware):

    def __init__(self, user_agent=''):
        self.user_agent = user_agent

    def process_request(self, request, spider):
        ua = random.choice(self.user_agent_list)
        if ua:
            logging.info('User-Agent:%s**************************************' % ua)
            request.headers.setdefault('User-Agent', ua)
            # the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape

    # for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
    user_agent_list = [ \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]


# 设置Proxy
class ProxyMiddleware(object):
    # overwrite process request
    def process_request(self, request, spider):
        # Set the location of the proxy
        proxy_ip, is_usable = self.is_usable(request.url)
        while not is_usable:
            logging.info("%s 不可用" % proxy_ip)
            # self.ip_list.remove(proxy_ip)
            proxy_ip, is_usable = self.is_usable(request.url)
        else:
            # logging.info("#### the Current ip address is %s ####" % proxy_ip)
            if hasattr(spider, 'splash_request') is False:
                logging.info("#### scrapy_proxy_ip is %s ####" % proxy_ip)
                request.meta['proxy'] = proxy_ip
            else:
                logging.info("#### splash_proxy_ip is %s ####" % proxy_ip)
                request.meta['splash']['args']['proxy'] = proxy_ip

    def is_usable(self, url):
        if url.find("https") > -1:
            proxy_ip = random.choice(self.ip_list_https)
        else:
            proxy_ip = random.choice(self.ip_list)
        proxy_ip.split('//')[0].replace(",", "").strip()
        ip = proxy_ip.split('//')[1].split(':')[0]
        port = proxy_ip.split(':')[2]
        is_usable = IpCommon.ip_is_usable(ip, port)
        return proxy_ip, is_usable

    # for url in ip_list:
    # ip from http://pachong.org/ ; http://www.xicidaili.com/nn
    ip_list = [
        # "http://122.193.14.110:80",
    ]
    ip_list_https = [
        # "https://58.253.70.149:8080",
    ]

    if IS_ADD_PROXY_IP == 'TRUE':
        db_operate = DbOperateClass()
        sql = "SELECT ip,ip_port,ip_type FROM cpa_ip_list WHERE available = '1'"
        (fc, count) = db_operate.select_by_sql(sql)
        for row in fc:
            ip = row[0]
            ip_port = row[1]
            ip_type = row[2]

            if ip_type is not None:
                if ip_type.lower().find("https") > -1:
                    ip_types = "https"
                else:
                    ip_types = "http"
            elif ip_type is None:
                ip_types = "http"
            ip = ip_types + "://" + ip + ":" + ip_port

            if ip.find("https") > -1:
                ip_list_https.append(ip)
            else:
                ip_list.append(ip)


# 动态渲染
class JavaScriptMiddleware(object):

    def process_request(self, request, spider):
        # dbOperate = DbOperateClass()
        if DbOperateClass.is_dynamic_rendering(spider.name):
            logging.info("request.meta: %s " % request.meta)
            logging.info("%s PhantomJS is starting..."% datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            proxy = request.meta['proxy']
            service_args = [
                '--proxy=' + proxy,
                # '--proxy-type=socks5',
                # '--proxy-auth=username:password',
                '--load-images=no',
            ]
            dcap = dict(DesiredCapabilities.PHANTOMJS)
            # dcap["phantomjs.page.settings.loadImages"] = False
            # dcap["phantomjs.page.settings.resourceTimeout"] = 5000  # 超时
            # # 伪造ua信息
            dcap["phantomjs.page.settings.userAgent"] = (request.headers['User-Agent'])
            # 添加头文件
            dcap["phantomjs.page.customHeaders.Referer"] = request.url
            driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)  # 线上使用方式
            logging.info("%s PhantomJS is getUrl..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.set_page_load_timeout(100)
            driver.set_script_timeout(100)
            driver.get(request.url)
            # driver.implicitly_wait(15)
            # time.sleep(15)
            logging.info("%s PhantomJS getUrl completed..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            body = driver.page_source
            return HtmlResponse(driver.current_url, body=body, encoding='utf-8', request=request)
            try:
                driver.service.process.send_signal(signal.SIGTERM)
                driver.quit()
            except Exception as e:
                logging.error("JavaScriptMiddleware_driver.quit_failed %s" % e)
        else:
          pass


# 重试
class RetryMiddleware(RetryMiddleware):

    def __init__(self, settings):
        if not settings.getbool('RETRY_ENABLED'):
            raise NotConfigured
        self.max_retry_times = settings.getint('RETRY_TIMES')
        self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
        self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
        #    self.errfile = open('/tmp/errurl.txt', 'wb') #服务器地址
        # self.errfile = open('E:/errurl.txt', 'wb') #本地调试地址

    def process_exception(self, request, exception, spider):
        # self.errfile.write((request.url + '\t' + exception.__str__() + '\r\n').encode('utf-8'))
        # 错误写入日志表
        spider.logger.debug("#### process_exception %s , spider %s ####" % (exception.__str__(), spider))
        return None

    # def _retry(self):
    #     return None