#!/usr/bin/env python
# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
import base64
import random
from scrapy import signals
from base.base import agents

class TutorialSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

class ProxyMiddleware(object):

    def process_request(self, request, spider):
        '''
        对于通过下载中间件的每个请求都调用此方法。
        process_request()应该：返回None，返回一个 Response对象，返回一个Request 对象，或者加注IgnoreRequest。
        如果它返回None，Scrapy将继续处理此请求，执行所有其他中间件，直到最后，适当的下载程序处理程序被称为执行的请求（并且其响应已下载）
        如果它返回一个Response对象，Scrapy不会打扰调用任何其他process_request()或process_exception()方法或适当的下载功能; 它会返回该响应。process_response() 安装的中间件的方法总是在每个响应中调用。
        如果它返回一个Request对象，Scrapy将停止调用process_request方法并重新计划返回的请求。一旦执行了新返回的请求，将在下载的响应上调用适当的中间件链。
        如果引发IgnoreRequest异常，process_exception()将会调用安装的下载器中间件的 方法。如果没有处理异常，Request.errback则调用request（）的errback函数。如果没有代码处理引发的异常，它将被忽略并且不记录（与其他异常不同）。
        :param request: 
        :param spider: 
        :return: 
        '''
        # # Use the following lines if your proxy requires authentication
        # proxy_user_pass = "USERNAME:PASSWORD"
        # # setup basic authentication for the proxy
        # encoded_user_pass = base64.encodestring(proxy_user_pass)
        # request.headers['Proxy-Authorization'] = 'Basic ' + encoded_user_pass
        print ("**********process_request***********")
        self.logger = spider.logger
        self.spidersql = spider.spidersql
        self.cursor = spider.cursor
        self.mysql = spider.mysql
        request.headers.setdefault("User-Agent", agents[random.randint(0, len(agents) - 1)])  #设置请求头
        try:
            self.cursor.execute(self.spidersql.getproxyip())
            data = self.cursor.fetchone()
            sql = self.spidersql.updateproxyipusetime(data[0])
            self.cursor.execute(sql)
            self.mysql.db.commit()
        except:
            self.logger.error("get proxyip sql and update proxyip execute fail,please check ")
            self.mysql.db.rollback()
        else:
            proxy = "http://%s:%s" % (str(data[1]).strip(), str(data[2]).strip())
            # self.logger.info(proxy)
            request.meta['proxy'] = proxy

            self.logger.info("get proxyip success: %s port: %s" % (data[1], data[2]))

        return None



    def process_response(self,request,response,spider):
        '''
        process_response()应该：返回一个Response 对象，返回一个Request对象或引发IgnoreRequest异常。
        如果它返回Response（可能是相同的给定响应或全新的响应），该响应将继续与process_response()链中的下一个中间件一起处理
        如果它返回一个Request对象，中间件链将被停止，并且返回的请求被重新安排以便日后下载。这与返回请求的行为是一样的process_request()。
        如果引发IgnoreRequest异常，Request.errback则调用request（）的errback函数。如果没有代码处理引发的异常，它将被忽略并且不记录（与其他异常不同）。
        :param request: 
        :param response: 
        :param spider: 
        :return: 
        '''
        print ("**********process_response***********")
        self.logger.info("get response success")
        return response


    def process_exception(self,request,exception,spider):
        '''
        Scrapy调用process_exception()当下载处理程序或process_request()（从下载中间件）会引发异常（包括IgnoreRequest例外）
        process_exception()应该返回：任一个None，一个Response对象或一个Request对象。
        如果返回None，Scrapy将继续处理此异常，执行任何其他process_exception()安装中间件的方法，直到没有中间件，并且默认的异常处理进入
        如果它返回一个Response对象，process_response() 则启动安装的中间件的方法链，Scrapy不会打扰任何其他process_exception()中间件的方法。
        如果返回一个Request对象，则返回的请求将重新安排，以便将来下载。这样就可以阻止process_exception()中间件的方法的执行 与返回响应一样。
        :param request: 
        :param response: 
        :param spider: 
        :return: 
        '''
        print ("**********process_exception***********")
        self.logger.error('response is null ,maybe proxy failure or please change agent: %s' % exception)
        return None

class SingleMiddlewares(object):

    @classmethod
    def from_crawler(cls, crawler):
        cs = crawler.signals.connect
        s = cls()
        cs(s.engine_started, signal=signals.engine_started)
        cs(s.engine_stopped, signal=signals.engine_stopped)
        cs(s.spider_opened, signal=signals.spider_opened)
        cs(s.spider_idle, signal=signals.spider_idle)
        cs(s.spider_closed, signal=signals.spider_closed)
        cs(s.spider_error, signal=signals.spider_error)
        cs(s.request_scheduled, signal=signals.request_scheduled)
        cs(s.request_dropped, signals=signals.request_dropped)
        cs(s.request_received, signals=signals.request_received)
        cs(s.response_received, signal=signals.response_received)
        cs(s.response_downloaded, signal=signals.response_downloaded)
        cs(s.item_scraped, signal=signals.item_scraped)
        cs(s.item_passed, signals=signals.item_passed)
        cs(s.item_dropped, signal=signals.item_dropped)

    def engine_started(self):
        self.logger.info("HooksasyncExtension, signals.engine_started fired")

    def engine_stopped(self):
        self.logger.info("HooksasyncExtension, signals.engine_stopped fired")

    def item_scraped(self, item, spider, response):
        self.logger.info("HooksasyncExtension, signals.item_scraped fired")
        spider.item_scraped(item, response)

    def item_passed(self, item, spider, output):
        pass

    def item_dropped(self, item, spider, exception):
        self.logger.info("HooksasyncExtension, signals.item_dropped fired")
        spider.item_dropped(item, exception)

    def spider_closed(self, spider, reason):
        self.logger.info("HooksasyncExtension, signals.spider_closed fired")
        spider.spider_closed(reason)

    def spider_opened(self, spider):
        self.logger = spider.logger
        self.logger.info("HooksasyncExtension, signals.spider_opened fired")
        spider.spider_opened()

    def spider_idle(self, spider):
        self.logger.info("HooksasyncExtension, signals.spider_idle fired")
        spider.spider_idle()


    def spider_error(self, failure, response, spider):
        self.logger.info("HooksasyncExtension, signals.spider_error fired")
        spider.spider_error(failure,response)

    def request_received(self, request, spider):
        pass

    def request_scheduled(self, request, spider):
        self.logger.info("HooksasyncExtension, signals.request_scheduled fired")
        spider.request_scheduled(request)

    def request_dropped(self, request, spider):
        pass

    def response_received(self, response, request, spider):
        self.logger.info("HooksasyncExtension, signals.response_received fired")
        spider.response_received(response,request)

    def response_downloaded(self, response, request, spider):
        self.logger.info("HooksasyncExtension, signals.response_downloaded fired")
        spider.response_downloaded(response, request)

