# -*- coding: utf-8 -*-
__author__ = 'lztkdr'
__date__ = '2019/7/26 14:16'

from scrapy import signals

class ProxyMiddleware(object):

	@classmethod
	def from_crawler(cls, crawler):
		# This method is used by Scrapy to create your spiders.
		s = cls()
		crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
		return s
	def process_request(self, request, spider):
		#代理有坑！！ https://www.jianshu.com/p/d679b79f6760
		if spider.enabled_proxy and spider.mode == 1 and spider.proxy_host:
			if request.url.startswith("http://"):
				request.meta['proxy'] = "http://" + spider.proxy_host  # http代理
			elif request.url.startswith("https://"):
				request.meta['proxy'] = "https://" + spider.proxy_host  # https代理
		return None

	def process_response(self, request, response, spider):
		return response

	def process_exception(self, request, exception, spider):
		print(f"出现异常：{exception}")

	def spider_opened(self, spider):
		spider.logger.info('Spider opened: %s' % spider.name)
