# -*- coding: utf-8 -*-

# Scrapy settings for SFPM_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os
import random
import datetime

BOT_NAME = 'SFPM_spider'

SPIDER_MODULES = ['SFPM_spider.spiders']
NEWSPIDER_MODULE = 'SFPM_spider.spiders'

# 指定request对象的去重过滤器
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 指定scheduler队列 (url调度器队列)
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# url调度器持久化
SCHEDULER_PERSIST = True

# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"  # 优先级队列
# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"  # 队列
# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"  # 栈

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'SFPM_spider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 6

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = random.uniform(1.3, 1.5)
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'SFPM_spider.middlewares.FdcSpiderSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html

DOWNLOADER_MIDDLEWARES = {
    'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
    "SFPM_spider.middlewares.CustomRetryMiddleware": 500,

    'SFPM_spider.middlewares.UserAgentMiddleware': 544,
    # 'SFPM_spider.middlewares.InnerNetProxyMiddleware': 545,
    # 'SFPM_spider.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
    'SFPM_spider.middlewares.SfpmCookiesMiddleware': 546,  # 针对司法拍卖爬虫的cookies
    # 'SFPM_spider.middlewares.SfpmProxyMiddleware': 545,  # 针对司法拍卖爬虫的代理，需要同时开启 RedisConnPipeline
    # 'SFPM_spider.middlewares.DailiyunSfpmProxyMiddleware': 545,  # 针对司法拍卖爬虫的代理(代理云)，需要同时开启 RedisConnPipeline
    'SFPM_spider.middlewares.KuaidailiSfpmProxyMiddleware': 545,  # 针对司法拍卖爬虫的代理(快代理)，需要同时开启 RedisConnPipeline
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
EXTENSIONS = {
    # 'scrapy.extensions.telnet.TelnetConsole': None,
    'SFPM_spider.extentions.CustomRedisSpiderIdleClosedExtensions': 500,
}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'SFPM_spider.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
    'SFPM_spider.pipelines.FdcSpiderPipeline': 300,

    'SFPM_spider.pipelines.MongoClientPipeline': 350,  # 将数据保存到mongo
    # 'scrapy_redis.pipelines.RedisPipeline': 400,  # 将数据保存到redis的pipeline

    'SFPM_spider.pipelines.SfpmCasePipeline': 300,
    'SFPM_spider.pipelines.SfpmCaseImagesPipeline': 301,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


# 日志
today = datetime.datetime.now()
log_path = "{}/logs/{}_{}_{}.log".format(BOT_NAME, today.year, today.month, today.day)
# LOG_FILE = log_path
# LOG_LEVEL = "WARNING"
LOG_LEVEL = "DEBUG"

# UA池
USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
]

# 代理IP(用于堡垒机内网)
"""
PROXY = ['192.168.1.161:65500', '192.168.1.162:65500', '192.168.1.163:65500', '192.168.1.164:65500',
         '192.168.1.166:65500', '192.168.1.171:65500', '192.168.1.182:65500',
         '192.168.1.184:65500', '192.168.1.185:65500', '192.168.1.186:65500',
         '192.168.1.187:65500', '192.168.1.188:65500', '192.168.1.189:65500', '192.168.1.190:65500',
         '192.168.1.191:65500', '192.168.1.192:65500', '192.168.1.193:65500', '192.168.1.194:65500',
         '192.168.1.195:65500', '192.168.1.196:65500', '192.168.1.197:65500', '192.168.1.198:65500',
         '192.168.1.199:65500', '192.168.1.200:65500']
"""
PROXY = ['192.168.1.162:65500', '192.168.1.164:65500', '192.168.1.166:65500', '192.168.1.169:65500',
         '192.168.1.182:65500', '192.168.1.183:65500', '192.168.1.185:65500', '192.168.1.186:65500',
         '192.168.1.187:65500', '192.168.1.188:65500', '192.168.1.189:65500', '192.168.1.190:65500',
         '192.168.1.191:65500', '192.168.1.192:65500', '192.168.1.193:65500', '192.168.1.194:65500',
         '192.168.1.195:65500', '192.168.1.196:65500', '192.168.1.197:65500', '192.168.1.198:65500',
         '192.168.1.199:65500', '192.168.1.200:65500', ]
# 代理API
PROXY_URL = 'http://183.230.7.247:10888/getip?addr=&tof=f'

# Retry Middleware 是否启用,默认：True
# RETRY_ENABLED = False
# 最多的重试次数
RETRY_TIMES = 10
# 重试的 response 返回值(code)
RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 404, 408, 407]

# 下载超时
DOWNLOAD_TIMEOUT = 10

# MONGODB
"""
1.replicaSet的name为空则不使用集群配置
2.user和pwd为空则不需要进行密码校验
"""
MONGODB = {
    'host': '192.168.5.236',
    'port': '27017',
    'user': '',
    'pwd': '',
    'replicaSet': {
        'name': '',
        "members": [
            {
                "host": "localhost",
                "port": "27017"
            },
        ]
    }
}

# redis(方式一)
REDIS_URL = "redis://127.0.0.1:6379/6"
# redis(方式二)
# REDIS_HOST = "127.0.0.1"
# REDIS_PORT = 6379

# 开启自定义扩展
EXT_ENABLED = True
# 配置允许的空闲时长
# IDLE_NUM*5  秒后，爬虫程序关闭
IDLE_NUM = 120

# 设置图片保存目录
IMAGES_STORE = os.path.join((os.path.dirname(__file__)), 'images')
# 设置item的对应字段(默认image_urls、images)
IMAGES_URLS_FIELD = 'imageUrls'
IMAGES_RESULT_FIELD = 'imagesInfo'
# 设置图片有效期(默认90天)
IMAGES_EXPIRES = 90
# 设置小图片的过滤范围
# IMAGES_MIN_HEIGHT = 110
# IMAGES_MIN_WIDTH = 110
# 生成缩略图，key为文件夹名，value为(长*宽)
# IMAGES_THUMBS = {
#     'small': (50, 50),
#     'big': (270, 270),
# }

"""
司法拍卖
1.【注意】如果SFPM_PROVINCE参数被注释，进行全国爬取;SFPM_PROVINCE为空列表，不进行爬取
2.【注意】如果只设置省份(直辖市)，不设置城市，默认爬取该省份(直辖市)的所有城市
3.【注意】如要设置城市，需先设置(全国爬取时不需要设置)该城市所在 省份(直辖市)
4.【注意】不要带 省、市 等后缀(如四川省，应设置为四川)
"""
# 设置 省份(直辖市)
SFPM_PROVINCE = ['重庆', '四川']
# 设置 城市
# SFPM_CITY = {'重庆': ['重庆', ],
#              '四川': ['成都', ],
#              }
# 开启redis案例增量去重(默认开启)
SFPM_CASE_REDIS = True
# 指定需要增量爬取的省份
SFPM_FILTER_PROVINCE = ['全国']
# 指定需要增量爬取的城市，不建议设置
# SFPM_FILTER_CITY = []