# -*- coding: utf-8 -*-

# Scrapy settings for zhihu_spider project


BOT_NAME = 'zhihu_spider'

SPIDER_MODULES = ['zhihu_spider.spiders']
NEWSPIDER_MODULE = 'zhihu_spider.spiders'

# 延迟下载
DOWNLOAD_DELAY = 0.25
# 宽度优先的策略
SCHEDULER_ORDER = 'BFO'
# 单爬虫的最大并行请求数
CONCURRENT_REQUESTS_PER_SPIDER = 20
# Item Processor(即 Item Pipeline) 同时处理(每个response的)item的最大值
CONCURRENT_ITEMS = 100
# Scrapy downloader 并发请求(concurrent requests)的最大值
CONCURRENT_REQUESTS = 16
# 对单个网站进行并发请求的最大值
CONCURRENT_REQUESTS_PER_DOMAIN = 8
# 对单个IP进行并发请求的最大值   如果非0，则忽略 CONCURRENT_REQUESTS_PER_DOMAIN 设定，
#  使用该设定。 也就是说，并发限制将针对IP，而不是网站
CONCURRENT_REQUESTS_PER_IP = 0
# 爬取网站最大允许的深度(depth)值。如果为0，则没有限制
DEPTH_LIMIT = 0
# 用于根据深度调整request优先级 如果为0，则不根据深度进行优先级调整
DEPTH_PRIORITY = 0
# 是否启用DNS内存缓存(DNS in-memory cache)
DNSCACHE_ENABLED = True

# --------------- 自动限速(AutoThrottle)扩展 ----------------
# 启用AutoThrottle扩展
AUTOTHROTTLE_ENABLED = True
# 初始下载延迟
AUTOTHROTTLE_START_DELAY = 3.0
AUTOTHROTTLE_CONCURRENCY_CHECK_PERIOD = 10  # How many responses should pass to perform concurrency adjustments.

# 下载中间件
DOWNLOADER_MIDDLEWARES = {
    'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
    'zhihu_spider.contrib.downloadmiddleware.rotate_useragent.RotateUserAgentMiddleware': 400,
}

# To make RotateUserAgentMiddleware enable.
USER_AGENT = ''

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED=True
# HTTPCACHE_EXPIRATION_SECS=0
# HTTPCACHE_DIR='httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES=[]
# HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'

# 日志记录
LOG_FILE = "logs/scrapy.log"

# ------------scrapy-redis 分布式爬虫相关设置-----------------
# 修改scrapy默认的调度器为scrapy重写的调度器 启动从reids缓存读取队列调度爬虫
SCHEDULER = "scrapy_redis.scheduler.Scheduler"

# 调度状态持久化，不清理redis缓存，允许暂停/启动爬虫
SCHEDULER_PERSIST = True

# 请求调度使用优先队列（默认)
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
# scrapy-redis 去重
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'

# 请求调度使用FIFO队列
# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderQueue'

# 请求调度使用LIFO队列
# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderStack'

# 最大的空闲时间，避免分布式爬取得情况下爬虫被关闭
# 此设置只适用于SpiderQueue和SpiderStack
# 也是爬虫第一次启动时的等待时间（应为队列是空的）
# SCHEDULER_IDLE_BEFORE_CLOSE = 10

# 存储爬取到的item，一定要在所有的pipeline最后，即设定对应的数字大于其他pipeline

ITEM_PIPELINES = {
    # 'zhihu_spider.pipelines.aboutUrlsPipelines.AboutZhihuUserSpiderPipeline': 300,
    # 'zhihu_spider.pipelines.aboutUrlsPipelines.FileStorePipeline': 500,
    'zhihu_spider.pipelines.detailPipelines.ZhihuSpiderPipeline': 300,
    'zhihu_spider.pipelines.detailPipelines.MySQLStorePipeline': 500,
    'scrapy_redis.pipelines.RedisPipeline': 800,
    # 'zhihu_spider.basePipelines.BaseZhihuUserSpiderPipeline': 300,
    # 'zhihu_spider.basePipelines.MySQLStorePipeline': 800,
}
# 指定redis的地址和端口(可选，程序将使用默认的地址localhost:6379)
REDIS_HOST = 'localhost'
REDIS_PORT = 6379

# 声明redis的url地址（可选）
# 如果设置了这一项，则程序会有限采用此项设置，忽略REDIS_HOST 和 REDIS_PORT的设置
# REDIS_URL = 'redis://user:pass@hostname:9001'
# ------------end scrapy-redis----------------------------


# ------------graphite setting ---------------------------
STATS_CLASS = 'zhihu_spider.statscol.graphite.RedisGraphiteStatsCollector'
GRAPHITE_HOST = '192.168.0.106'
GRAPHITE_PORT = 2003
GRAPHITE_IGNOREKEYS = []
