# Scrapy settings for spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

''' 全局配置文件 '''

import datetime
import scrapy
from scrapy_redis.spiders import RedisSpider

# 是否开启redis，如果不开启，爬虫继承scrapy.Spider，否则继承RedisSpider
REDIS_ON = False
EXTENDS_SPIDER = RedisSpider if REDIS_ON else scrapy.Spider

# 项目名称，用于爬虫的User-Agent代理等的设置
BOT_NAME = 'spider'

SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'

today = datetime.datetime.now()
TODAY_STR = today.strftime('%Y-%m-%d')

# 日志相关配置，注释掉之后不记录日志
# LOG_LEVEL = 'DEBUG'
# LOG_FILE = 'log/scrapy_{}.log'.format(TODAY_STR)

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'spider (+http://www.yourdomain.com)'

# 是否遵循爬虫的robots.txt协议
ROBOTSTXT_OBEY = False

# 导出文件目录和文件名
DOWNLOAD_FILE_NAME = 'download/{}_{}.csv'.format('{}', TODAY_STR)

# 是否为增量爬取数据，增量爬取只下载今天更新的内容
# DOWNLOAD_TODAY = False
#
# 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据
DEDUPLICATED = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'spider.middlewares.SpiderSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'spider.middlewares.SpiderDownloaderMiddleware': 543,
# }

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

SPIDER_MODULES = [
    # task1
    'spider.spiders.task1.elcano',
    'spider.spiders.task1.elcanoBlog',
    'spider.spiders.task1.elcanoBrussels',
    'spider.spiders.task1.elcanoEvent',
    'spider.spiders.task1.elcanoNotas',
    'spider.spiders.task1.elcanoReports',

    # task2
    'spider.spiders.task2.iss',

    # task3
    'spider.spiders.task3.ceps',
    'spider.spiders.task3.econpol',
    'spider.spiders.task3.greengrowthknowledge',

    # task4
    # 无法访问

    # task5
    'spider.spiders.task5.epc',
    'spider.spiders.task5.publication',

    # task6
    'spider.spiders.task6.schuman',

    # task7
    'spider.spiders.task7.kas',

    # task8
    'spider.spiders.task8.ecdpm',
    'spider.spiders.task8.ecdpm2',

    # task9
    'spider.spiders.task9.giga',
    'spider.spiders.task9.giga2',

    # task10
    'spider.spiders.task10.dgap',
]

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# 爬虫导出数据后，后续处理的管道
ITEM_PIPELINES = {
    'spider.pipelines.filesPipeline.FilesPipeline': 1,
    # 'scrapy_redis.pipelines.RedisPipeline': 300,
    'spider.pipelines.mysqlPipeline.MysqlPipeline': 400,
    # 'spider.pipelines.issPipeline': 300,
}

if REDIS_ON:
    ITEM_PIPELINES['scrapy_redis.pipelines.RedisPipeline'] = 300
    # redis master配置
    REDIS_HOST = '127.0.0.1'
    REDIS_PORT = 6379

    # 调度器配置

    # 调度
    SCHEDULER = "scrapy_redis.scheduler.Scheduler"

    # 默认使用优先级队列
    SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.LifoQueue'

    # 调度器中请求存据进行序列化，默认使用pickle
    SCHEDULER_QUEUE_KEY = '%(spider)s:requests'

    # 是否在关闭时候保留原来的调度器和去重记录
    SCHEDULER_PERSIST = True

    # 是否在开始之前清空调度器和去重记录
    SCHEDULER_FLUSH_ON_START = False

    # 去调度器中获取数据时，如果为空，最多等待时间
    SCHEDULER_IDLE_BEFORE_CLOSE = 10

    # 去重规则，在redis中保存时对应的key
    SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'

    # 去重的配置

    # 去重规则对应处理的类
    DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'

    # 去重Debug模式
    DUPEFILTER_DEBUG = False

# 下载PDF目录配置
# FILES_STORE = '/tmp/{}/'.format(TODAY_STR)
FILES_STORE = 'D:\\xxx\\{}\\'.format(TODAY_STR)

# Mysql配置
DB_HOST = '127.0.0.1'
DB_PORT = 3306
DB_USER = 'root'
DB_PASSWORD = '111111'
DB_DATABASE = 'UIBE_DC'

# 数据库连接编码
DB_CHARSET = "utf8"

# 存储使用unicode编码
DB_USE_UNICODE = True

# mincached : 启动时开启的闲置连接数量(缺省值 0 开始时不创建连接)
DB_MIN_CACHED = 1

# maxcached : 连接池中允许的闲置的最多连接数量(缺省值 0 代表不闲置连接池大小)
DB_MAX_CACHED = 10

# maxshared : 共享连接数允许的最大数量(缺省值 0 代表所有连接都是专用的)如果达到了最大数量,被请求为共享的连接将会被共享使用
DB_MAX_SHARED = 20

# maxconnecyions : 创建连接池的最大数量(缺省值 0 代表不限制)
DB_MAX_CONNECYIONS = 50

# blocking : 设置在连接池达到最大数量时的行为(缺省值 0 或 False 代表返回一个错误<toMany......> 其他代表阻塞直到连接数减少,连接被分配)
DB_BLOCKING = True

# maxusage : 单个连接的最大允许复用次数(缺省值 0 或 False 代表不限制的复用).当达到最大数时,连接会自动重新连接(关闭和重新打开)
DB_MAX_USAGE = 0

# setsession : 一个可选的SQL命令列表用于准备每个会话，如["set datestyle to german", ...]
DB_SET_SESSION = None

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
