# Scrapy settings for scrapy_demo_test project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html


BOT_NAME = "scrapy_demo_test"

SPIDER_MODULES = ["scrapy_demo_test.spiders"]
NEWSPIDER_MODULE = "scrapy_demo_test.spiders"


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = "scrapy_demo_test (+http://www.yourdomain.com)"

# Obey robots.txt rules
#ROBOTSTXT_OBEY = True
# 注释：在settings.py文件中，注释掉ROBOTSTXT_OBEY = True，才能爬取拥有反爬协议的网页

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#    "Accept-Language": "en",
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    "scrapy_demo_test.middlewares.ScrapyDemoTestSpiderMiddleware": 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    "scrapy_demo_test.middlewares.ScrapyDemoTestDownloaderMiddleware": 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    "scrapy.extensions.telnet.TelnetConsole": None,
#}

# 开启管道，需要在setting文件中，去除pipelines的注释（即item_pipelines为管道优先级）
# ，并且在ITEM_PIPELINES中添加管道的路径，如下：
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    # "scrapy_demo_test.pipelines.ScrapyDemoTestPipeline": 300,
    # "scrapy_demo_test.pipelines.scrapy_pipeline4dd.Pipelinedemo4DD": 300,
    "scrapy_demo_test.pipelines.scrapy_pipeline4dd1.Pipelinedemo4DD1": 301,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"

# Set settings whose default value is deprecated to a future-proof value
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"

# 数据库配置信息
MYSQLDB_HOST = '127.0.0.1'
MYSQLDB_PORT = 3306  # 端口号
MYSQLDB_USER = 'root'
MYSQLDB_PASSWORD = 'tesT#08$'
MYSQLDB_NAME = 'spider00' # database名称
MYSQLDB_CHARSET = 'utf8mb4'

# 日志配置信息
LOG_ENABLED = True      # 是否启用日志，默认为True【设定是否启用日志】
LOG_LEVEL = 'DEBUG'     # 日志级别，默认为INFO级别【设定日志级别（DEBUG/INFO/WARNING/ERROR）】
LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'  # 自定义日志格式，包含时间戳、日志名称、日志级别和日志消息
# LOG_FILE = 'logs/scrapy.log' # 日志文件路径为logs/scrapy.log文件；该属性注释后不自动生成scrapy.log文件，但控制台会输出日志信息
LOG_STDOUT = False      # 是否将日志输出到标准输出，默认为False，注释掉是避免与print冲突
                        # 即开启后，print的输出信息只会写到日志log文件的[stdout]标识 ，而不会输出到控制台显示
LOG_ENCODING = 'utf-8'  # 日志文件编码，默认为utf-8【设定日志文件编码】


# 豆瓣案例 的配置信息 ########################################################################################################
# 设置 User-Agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
ROBOTSTXT_OBEY = False  # 不遵守 robots.txt 规则
DOWNLOAD_DELAY = 2      # 设置下载延迟，避免过快请求
# 启用自动限速扩展
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 2
AUTOTHROTTLE_MAX_DELAY = 5
# ###########################################################################################################################
