# -*- coding: utf-8 -*-

# Scrapy settings for GuaZi_Spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'GuaZi_Spider'

SPIDER_MODULES = ['GuaZi_Spider.spiders']
NEWSPIDER_MODULE = 'GuaZi_Spider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'GuaZi_Spider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False  # 是否遵守robots.txt规则

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20 # 开启线程数量，默认16，可以自行设置

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1  #  下载延迟时间(单位：秒)。下载器在下载同一个网站下一个页面前需要等待的时间。该选项可以用来限制爬取速度， 减轻服务器压力。(反爬策略之一)
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16  # 将对任何单个域执行的并发（即同时）请求的最大数量。
#CONCURRENT_REQUESTS_PER_IP = 16 # 将对任何单个IP执行的并发（即同时）请求的最大数量。如果非零，CONCURRENT_REQUESTS_PER_DOMAIN则忽略该 设置，而改为使用此设置。换句话说，并发限制将应用于每个IP，而不是每个域。

# Disable cookies (enabled by default)
COOKIES_ENABLED = False # 如果要想 使 headers 加入的 cookie 生效，这里 必须为 False ！！！ 这个配置 坑啊 ，容易让人理解有问题

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# 默认请求头部信息，例如如下配置
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
	'Accept-Encoding':'gzip, deflate, br',
	'Accept-Language':'zh-CN,zh;q=0.9',
	'Connection':'keep-alive'
}

#  Spider中间件是介入到Scrapy中的spider处理机制的钩子框架，可以插入自定义功能来处理发送给 Spiders 的response，以及spider产生的item和request。
# Enable or disable spider downloader_middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
   'GuaZi_Spider.spider_middlewares.guazi_spider_middleware.GuaziSpiderMiddleware': 543,
}

# 要激活下载器中间件组件，将其加入到 DOWNLOADER_MIDDLEWARES 设置中。 该设置是一个字典(dict)，键为中间件类的路径，值为其中间件的顺序(order)。
# Enable or disable downloader downloader_middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
	'GuaZi_Spider.downloader_middlewares.headers_middleware.HeadersMiddleware': 100,  # 请求头，ua 代理，请求头 antipas_cookie 设置
	'GuaZi_Spider.downloader_middlewares.proxy_middleware.ProxyMiddleware':300,     # 代理 设置
    'GuaZi_Spider.downloader_middlewares.guazi_scrapy_middleware.GuaZiScrapyMiddleware':400 # 业务逻辑
}


# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# 每个Item Pipeline组件其实就是一个实现了一个简单方法的Python类。他们接受一个item并在上面执行逻辑，还能决定这个item到底是否还要继续往下传输，如果不要了就直接丢弃。
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
	'GuaZi_Spider.pipelines.GuaziImagePipeline': 200,
   'GuaZi_Spider.pipelines.GuaziSpiderPipeline': 300,
}

# AUTOTHROTTLE  — 自动限速 （反爬策略之一）
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5  #在高延迟的情况下设置的最大下载延迟 初始下载延迟时间(单位：秒)
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60 # 高并发请求时最大延迟时间(单位：秒)
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# 是否启用在本地缓存，如果开启会优先读取本地缓存，从而加快爬取速度，视情况而定
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

from  datetime import datetime
import os

################################################   images settings begin   #####################################################
#设置图片过期时间天数
#专业图片服务器：FastDFS, TFS
IMAGES_EXPIRES = 90

IMAGES_THUMBS = {
    'small': (120, 80),
    'big': (600, 400),
}

IMAGES_STORE = 'images'

#linux IMAGES_STORE = '/opt/images'


if not os.path.exists(IMAGES_STORE):
    os.makedirs(IMAGES_STORE)

################################################   images settings begin   ###################################################

################################################   log settings begin   #######################################################
#日志功能
#debug,  info,  error, warn    从低到高
#level 设置打印日志的水位线
#打印日志原则：level=info， 凡是比level的优先级高或者等，则打印日志
#线上日志一般level=error

LOG_LEVEL = "INFO"
LOG_DIR = "log"



if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

today = datetime.now()

LOG_FILE = f"{LOG_DIR}/scrapy_{today.year}_{today.month}_{today.day}.log"
################################################   log settings end   #######################################################

Conn_Config = {
	"host":'51try.top',
	"port":3300,
	"user":'spider',
	"password":'123456',
	"database":'scrapy_spider',
	"charset":'utf8mb4'
}

#使用 mysql-connector 方式的 连接串，防止使用 pymysql 容易出现的情况：https://www.jianshu.com/p/e09eb9cc2830
# 要使用 mysql-connector，同样需要安装：pip install mysql-connector
MYSQL_STR = f"mysql+mysqlconnector://{Conn_Config.get('user', None)}:{Conn_Config.get('password',None)}@{Conn_Config.get('host', None)}:{Conn_Config.get('port', None)}/{Conn_Config.get('database', None)}"

print(MYSQL_STR)


#########################   scrapy-redis 分布式配置  BEGIN   #############################

# 1.配置Rdis连接
Redis_Config = {
     'host':'51try.top',
     'port':6300,
	 "password":'63796379',
     'db':1,
}

"""
    redis://[:password]@localhost:6379/0
    rediss://[:password]@localhost:6379/0
    unix://[:password]@/path/to/socket.sock?db=0

"""
#Redis配置地址串
#密码前面必须加英文冒号，不然会报：redis.exceptions.AuthenticationError: Authentication required.
REDIS_URL = f"redis://:{Redis_Config.get('password',None)}@{Redis_Config['host']}:{Redis_Config['port']}/{Redis_Config['db']}"

print(REDIS_URL)


# 2.替换scrapy默认的调度器
#调度器调度策略  由scrapy原来的多队列调度  --->  scrapy redis组件的调度算法 Redis-based scheduler
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'


# 3.根据不同部署的系统，设定 去重机制
import platform
system = platform.system()

# 注：bf_dupefilter 效率更高
SYSTEM_PROCESS_MAP = {
    "Windows":"scrapy_redis.bf_dupefilter.RFPDupeFilter",
    "Linux":"scrapy_redis.dupefilter.RFPDupeFilter",
    "Darwin":"scrapy_redis.bf_dupefilter.RFPDupeFilter", # MacBook电脑系统
}

#去重策略
DUPEFILTER_CLASS = SYSTEM_PROCESS_MAP.get(system)

# 4.定制 Redis 版本的 Pipeline
ITEM_PIPELINES = {
	# 原来的 Pipeline
	'GuaZi_Spider.pipelines.GuaziImagePipeline': 200,
   'GuaZi_Spider.pipelines.GuaziSpiderPipeline': 300,
	# redis Pipeline
    'scrapy_redis.pipelines.RedisPipeline':350,
}

# 5. 设定 是否保留原来调度器中去重记录
#分布式爬取，是否保留原来调度器中去重记录
SCHEDULER_PERSIST = True  #True：持久化去重记录


#########################   scrapy-redis 分布式配置  END   #############################