# -*- coding: utf-8 -*-
# @author Funsion Wu

from datetime import datetime

BOT_NAME = 'funsion'
LOG_LEVEL = 'DEBUG'     # 生产环境要改为 LOG_LEVEL = 'INFO'
SPIDER_MODULES = ['funsion.spiders']
NEWSPIDER_MODULE = 'funsion.spiders'

DEPTH_LIMIT = 5
ROBOTSTXT_OBEY = True
DOWNLOAD_DELAY = 0.1
DOWNLOADER_MIDDLEWARES  = {
     'funsion.middleware.random_useragent.RandomUserAgentMiddleware':400,
}
ITEM_PIPELINES = {
     # 进程内去重，在开启scrapy-redis的情况下，可以关闭此功能
     'funsion.pipelines.duplicate_pipeline.DuplicatesPipeline':100,
     # 进程间去重，分布式环境下去重
     'scrapy_redis.pipelines.RedisPipeline': 300,      # pip install scrapy-redis
     'funsion.pipelines.images_pipeline.ImagesPipeline':500,     # 图片处理优先级要高，因为中间要修改item值
     # mongoDB优先级应该是最低，因为item要最后入库
     'funsion.pipelines.scrapy_mongodb.MongoDBPipeline':800,     # 使用修正版
     # 'scrapy_mongodb.MongoDBPipeline':800,      # 原版 pip install scrapy-mongodb
}
MONGODB_URI = 'mongodb://localhost:27017'
MONGODB_DATABASE = 'funsion'
MONGODB_COLLECTION = 'my_items'
MONGODB_UNIQUE_KEY = 'source_url'
MONGODB_ADD_TIMESTAMP = True

# 分布式采集，Enables scheduling storing requests queue in redis.
SCHEDULER = "scrapy_redis.scheduler.Scheduler"

# Ensure all spiders share same duplicates filter through redis.
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

# Specify the host and port to use when connecting to Redis (optional).
REDIS_HOST = '192.168.97.180'
REDIS_PORT = 6380

# The item pipeline serializes and stores the items in this redis key.
REDIS_ITEMS_KEY = '%(spider)s:items'

# Don't cleanup redis queues, allows to pause/resume crawls.
# 开启后，支持redis去重功能
SCHEDULER_PERSIST = True

# 图片存储
STORE_DIR = BOT_NAME + '_' + datetime.now().strftime("%Y%m")
IMAGES_STORE = '/home/www/data/%s/' %STORE_DIR  # 设置图片下载路径
REMOTE_URL = 'http://scrapy.funsion.com/%s/' %STORE_DIR
IMAGES_EXPIRES = 90      # 90天的图片失效期限

IMAGES_MIN_HEIGHT = 30   # 滤除小图片
IMAGES_MIN_WIDTH = 30
IMAGES_THUMBS = {        # 生成缩略图
    'small': (123, 123),
    'middle': (390, 390),
}

AJAXCRAWL_ENABLED = False     # 关闭Ajax页面采集
DOWNLOAD_TIMEOUT = 10         # 设置下载超时
RETRY_ENABLED = True          # 开启下载重试
COOKIES_ENABLED = False       # 禁用Cookies
CONCURRENT_REQUESTS = 50      # 设置全局并发数
