# -*- coding: utf-8 -*-

# Scrapy settings for dingxiangyuan project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'dingxiangyuan'

SPIDER_MODULES = ['dingxiangyuan.spiders']
NEWSPIDER_MODULE = 'dingxiangyuan.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 8

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'dingxiangyuan.middlewares.DingxiangyuanSpiderMiddleware': 543,
# }

USER_AGENT_LIST = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]

IPLIST = [
    'http://223.247.93.90:4216',
    'http://58.220.95.30:10174',
    'http://180.123.169.40:4216',
    'http://106.110.96.70:4216',
    'http://121.233.86.231:4216',
    'http://117.87.208.10:4216',
    'http://58.220.95.42:10174',
    'http://183.167.217.152:63000',
    'http://47.114.45.244:8101',
    'http://221.226.195.229:4216',
    'http://119.134.110.144:8118',
]

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'dingxiangyuan.middlewares.DingxiangyuanDownloaderMiddleware': 543,
    'dingxiangyuan.middlewares.RandomUserAgentMiddleware': 400,
    'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#  'dingxiangyuan.extensions.MyExtension': 200,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'dingxiangyuan.pipelines.PostgresSQLPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 1
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 10
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 5.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

LOG_LEVEL = 'WARNING'

REDIRECT_ENABLED = False

POSTGRESQL_HOST = "127.0.0.1"
POSTGRESQL_DATABASE = "dingxiangyuan"
POSTGRESQL_USER = "postgres"
POSTGRESQL_PASSWORD = "0000"
POSTGRESQL_PORT = "5432"

DATABASE_ENGINE = 'postgresql://postgres:0000@127.0.0.1:5432/dingxiangyuan'

BOARD_MAP = {
    'http://cardiovascular.dxy.cn/bbs/board/47': [1, '心血管'],  # 心脏病 脑血管
    'http://www.dxy.cn/bbs/board/87': [2, '肿瘤'],  # 恶性肿瘤
    'http://chest.dxy.cn/bbs/board/58': [3, '呼吸胸外'],
    'http://www.dxy.cn/bbs/board/112': [4, '危重急救'],

    'http://www.dxy.cn/bbs/board/53':[5, '卫生法律人文'],
    'http://www.dxy.cn/bbs/board/239': [6, '医学哲学和医学史'],
    'http://www.dxy.cn/bbs/board/144': [7, '卫生事业与医院管理'],

    'http://xdrug.dxy.cn/bbs/board/114': [8, '新药信息'],
    'http://www.dxy.cn/bbs/board/199': [9, '实验动物与组织学'],
    'http://www.dxy.cn/bbs/board/73': [10, '生物信息学'],
    'http://www.dxy.cn/bbs/board/126': [11, '预防医学'],

    'http://www.dxy.cn/bbs/board/45': [12, '论文投稿与写作'],
    'http://www.dxy.cn/bbs/board/82': [13, '临床执考'],
    'http://www.dxy.cn/bbs/board/6': [14, '检索知识'],
    'http://happy.dxy.cn/bbs/board/15': [15, '心情驿站']
}


BOARD_ID_MAP = {
    1: ['心血管', 'http://cardiovascular.dxy.cn/bbs/board/47'],
    2: ['肿瘤医学', 'http://www.dxy.cn/bbs/board/87'],
    3: ['卫生法律人文', 'http://www.dxy.cn/bbs/board/53'],
    4: ['医学哲学和医学史', 'http://www.dxy.cn/bbs/board/239'],
}

BOARD_ID_MAP2 = {
    '心血管':  1,
    '肿瘤医学': 2,
    '卫生法律人文': 3,
    '医学哲学和医学史': 4,
}

# #################################### scrapy实现redis缓存去重 ############################################
REDIS_HOST = '127.0.0.1'  # 主机名
REDIS_PORT = 6379  # 端口
# REDIS_PARAMS = {'password':'0000'}                 # Redis连接参数             默认：REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,}）
REDIS_ENCODING = "utf-8"

SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = 'dingxiangyuan.dupeFilter.RedisDupeFilter'
# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'  # 默认使用优先级队列（默认），其他：PriorityQueue（有序集合），FifoQueue（列表）、LifoQueue（列表）
SCHEDULER_QUEUE_KEY = '%(spider)s:requests'  # 调度器中请求存放在redis中的key
SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"  # 对保存到redis中的数据进行序列化，默认使用pickle
SCHEDULER_PERSIST = True  # 是否在关闭时候保留原来的调度器和去重记录，True=保留，False=清空
SCHEDULER_FLUSH_ON_START = False  # 是否在开始之前清空 调度器和去重记录，True=清空，False=不清空
SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'  # 去重规则，在redis中保存时对应的key

from scrapy_redis.scheduler import Scheduler
