# -*- coding: utf-8 -*-
# Scrapy settings for NewsSpider project
import os

BOT_NAME = 'NewsSpider'
LOG_LEVEL = 'WARNING'

SPIDER_MODULES = ['NewsSpider.spiders']
NEWSPIDER_MODULE = 'NewsSpider.spiders'

# 已经设置了scrapy的自定义命令 需要新建一个commands目录(和spiders目录同级)
# 文件名即是命令的名字  这里是scrapy crawlall 即 读取出来所有的爬虫名字 再分别进行scrapy crawl xxx

# COMMANDS_MODULE = ‘项目名称.目录名称’　
COMMANDS_MODULE = 'NewsSpider.commands'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Disable cookies (enabled by default)
# 默认情况下是禁用了Cookie的使用  需要设置为False时期可以传递cookie
COOKIES_ENABLED = False

user_agent_list = []

# random chrome firefox google safari
RANDOM_UA_TYPE = 'random'

'''MONGO配置'''
MONGO_URI = 'mongodb://localhost:27017'
# MONGO_DB = 'Images'
MONGO_DB = 'Spider'

'''Redis配置'''
# 当前使用的199的ip库
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_PASSWORD = None
REDIS_DB = 0
WENSHU_QUCHONG = 'wenshuquchong'
# 解决获取的值类型是bytes字节问题
DECODE_RESPONSES = True

'''proxy_redis配置'''
PROXY_REDIS_HOST = '127.0.0.1'
PROXY_REDIS_PORT = 6379
PROXY_REDIS_PASSWORD = None
PROXY_REDIS_DB = 0
PROXY_DECODE_RESPONSES = True


'''本地Redis配置'''
LOCAL_REDIS_HOST = '127.0.0.1'
LOCAL_REDIS_PORT = 6379
LOCAL_REDIS_PASSWORD = None
LOCAL_REDIS_DB = 0
LOCAL_DECODE_RESPONSES = True


'''本地MYSQl配置'''
MYSQL_HOST = '127.0.0.1'
MYSQL_PORT = 3306
MYSQL_DATABASE = 'spider'
MYSQL_USER = 'root'
MYSQL_PASSWORD = '123456'
MYSQL_USE_UNICODE = True
MYSQL_CHARSET = 'utf8mb4'


project_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(project_dir,'images')

'''重试设置'''
# 用于开启中间件，默认为TRUE
# RETRY_ENABLED = True

# 重试次数, 默认为2
# RETRY_TIMES = 2

# 遇到哪些返回状态码需要重试, 一个列表，默认为[500, 503, 504, 400, 408]
# RETRY_HTTP_CODES = [500, 503, 504, 400, 408]

# 调整相对于原始请求的重试请求优先级，默认为 - 1
# RETRY_PRIORITY_ADJUST = -1

'''Splash下载器中间件'''
# SPLASH_URL = 'http://192.168.99.100:8050'

# DOWNLOADER_MIDDLEWARES = {
#     'scrapy_splash.SplashCookiesMiddleware': 723,
#     'scrapy_splash.SplashMiddleware': 725,
#     'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
# }

# 去重过滤器
# DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# 使用Splash的Http缓存
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'

# 如果配置这个字段 imagepipelines会首先读取该地址
# IMAGES_URLS_FIELD = ''

'''Scrapy Extend'''
# 配置Scrapy执行的最大并发请求（默认值：16）
CONCURRENT_REQUESTS = 32

# 最大等待时间
DOWNLOAD_TIMEOUT = 6

# 设置延迟
# DOWNLOAD_DELAY = 0.5

# 每个域的并发请求数
# CONCURRENT_REQUESTS_PER_DOMAIN = 16

# 每个IP的并发请求数
# CONCURRENT_REQUESTS_PER_IP = 16


'''启用并配置自动油门扩展（默认情况下禁用）
See https://doc.scrapy.org/en/latest/topics/autothrottle.html'''

# AUTOTHROTTLE_ENABLED = True
# 初始下载延迟
# AUTOTHROTTLE_START_DELAY = 5

# 在高延迟情况下设置最大下载延迟
# AUTOTHROTTLE_MAX_DELAY = 60

# Scrapy应并行发送每个远程服务器的平均请求数
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

# 允许对接收到的每个响应显示节流状态
#AUTOTHROTTLE_DEBUG = False

# 启用和配置HTTP缓存（默认情况下禁用）
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

'''日志管理'''

# LOG_ENCODING 默认: 'utf-8'，logging使用的编码

# 保存日志信息的文件名
# LOG_FILE = "bilibililog.log"

# 保存日志等级,高于或等于此等级的信息都被保存

# LOG_LEVEL = 'INFO'
# LOG_LEVEL = 'ERROR'
# LOG_LEVEL = 'DEBUG'
# LOG_LEVEL = 'CRITICAL'
