# -*- coding: utf-8 -*-

# Scrapy settings for FDC_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os
import random
import datetime

BOT_NAME = 'FDC_spider'

SPIDER_MODULES = ['FDC_spider.spiders']
NEWSPIDER_MODULE = 'FDC_spider.spiders'

# 指定request对象的去重过滤器
# DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 指定scheduler队列 (url调度器队列)
# SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# url调度器持久化
# SCHEDULER_PERSIST = True
# 是否在开始之前清空 调度器和去重记录，True=清空，False=不清空，默认False
# SCHEDULER_FLUSH_ON_START = True

# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"  # 优先级队列
# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"  # 队列
# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"  # 栈

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'FDC_spider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 8

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = random.uniform(1, 1.5)
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# COOKIES_DEBUG = True

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'FDC_spider.middlewares.FdcSpiderSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html

DOWNLOADER_MIDDLEWARES = {
    'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
    "FDC_spider.middlewares.CustomRetryMiddleware": 500,

    # 'FDC_spider.middlewares.FdcSpiderDownloaderMiddleware': 543,
    'FDC_spider.middlewares.UserAgentMiddleware': 544,
    # 'FDC_spider.middlewares.InnerNetProxyMiddleware': 545,
    'FDC_spider.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
    # 'FDC_spider.middlewares.NanjingProxyMiddleware': 545,  # 针对南京市爬虫的代理，需要同时开启 RedisConnPipeline
    # 'FDC_spider.middlewares.SfpmProxyMiddleware': 545,  # 针对司法拍卖爬虫的代理，需要同时开启 RedisConnPipeline
    'FDC_spider.middlewares.SfpmCookiesMiddleware': 546,  # 针对司法拍卖爬虫的cookies配置
    # 'FDC_spider.middlewares.DongguanOuterNetProxyMiddleware': 545,  # 针对东莞爬虫的外网代理，需要同时开启 RedisConnPipeline
    # 'FDC_spider.middlewares.DongguanInnerNetProxyMiddleware': 545,  # 针对东莞爬虫的内网代理
    # 'FDC_spider.middlewares.KuaidailiSfpmProxyMiddleware': 545,  # 快代理
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
EXTENSIONS = {
    # 'scrapy.extensions.telnet.TelnetConsole': None,
    'FDC_spider.extentions.CustomRedisSpiderIdleClosedExtensions': 500,
}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'FDC_spider.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
    'FDC_spider.pipelines.FdcSpiderPipeline': 300,

    # 'FDC_spider.pipelines.FoshanPipeline': 301,
    # 'FDC_spider.pipelines.XiamenPipeline': 302,
    # 'FDC_spider.pipelines.BeijingPipeline': 303,
    # 'FDC_spider.pipelines.KunmingPipeline': 304,
    # 'FDC_spider.pipelines.WuxiPipeline': 305,
    # 'FDC_spider.pipelines.NanningPipeline': 306,
    # 'FDC_spider.pipelines.DongguanPipeline': 307,
    'FDC_spider.pipelines.ShijiazhuangPipeline': 308,
    'FDC_spider.pipelines.GuiyangPipeline': 309,
    'FDC_spider.pipelines.ZhongshanPipeline': 310,
    'FDC_spider.pipelines.HuzhouPipeline': 311,
    # 'FDC_spider.pipelines.BaodingPipeline': 312,
    # 'FDC_spider.pipelines.ChangzhouPipeline': 313,
    # 'FDC_spider.pipelines.NanjingPipeline': 314,
    # 'FDC_spider.pipelines.ShenzhenPipeline': 315,
    # 'FDC_spider.pipelines.XuzhouPipeline': 316,
    # 'FDC_spider.pipelines.GanzhouPipeline': 317,
    # 'FDC_spider.pipelines.YichangPipeline': 318,
    # 'FDC_spider.pipelines.XiangyangPipeline': 319,
    # 'FDC_spider.pipelines.WuhanPipeline': 320,
    # 'FDC_spider.pipelines.NanchongPipeline': 321,
    # 'FDC_spider.pipelines.QingdaoPipeline': 322,
    # 'FDC_spider.pipelines.ZhuhaiPipeline': 323,
    # 'FDC_spider.pipelines.HuanggangPipeline': 324,
    'FDC_spider.pipelines.ChongqingPipeline': 325,
    # 'FDC_spider.pipelines.LianyungangPipeline': 326,
    'FDC_spider.pipelines.YueyangPipeline': 327,
    'FDC_spider.pipelines.DeyangPipeline': 328,
    'FDC_spider.pipelines.ZhoushanPipeline': 329,
    'FDC_spider.pipelines.GuanganPipeline': 330,
    'FDC_spider.pipelines.ZiyangPipeline': 331,
    'FDC_spider.pipelines.XiaoganPipeline': 332,
    'FDC_spider.pipelines.JingmenPipeline': 333,
    'FDC_spider.pipelines.TongrenPipeline': 334,
    'FDC_spider.pipelines.HuangshiPipeline': 335,
    'FDC_spider.pipelines.ShantouPipeline': 336,
    'FDC_spider.pipelines.JiujiangPipeline': 337,
    'FDC_spider.pipelines.LuoyangPipeline': 338,
    'FDC_spider.pipelines.AnqingPipeline': 339,
    'FDC_spider.pipelines.ChangchunPipeline': 340,

    # 'FDC_spider.pipelines.FoshanCsvPipeline': 351,
    # 'FDC_spider.pipelines.XiamenCsvPipeline': 352,
    # 'FDC_spider.pipelines.BeijingCsvPipeline': 353,
    # 'FDC_spider.pipelines.KunmingCsvPipeline': 354,
    # 'FDC_spider.pipelines.WuxiCsvPipeline': 355,
    # 'FDC_spider.pipelines.NanningCsvPipeline': 356,
    # 'FDC_spider.pipelines.DongguanCsvPipeline': 357,
    # 'FDC_spider.pipelines.ShijiazhuangCsvPipeline': 358,
    # 'FDC_spider.pipelines.GuiyangCsvPipeline': 359,
    # 'FDC_spider.pipelines.ZhongshanCsvPipeline': 360,
    # 'FDC_spider.pipelines.HuzhouCsvPipeline': 361,
    # 'FDC_spider.pipelines.BaodingCsvPipeline': 362,
    # 'FDC_spider.pipelines.ChangzhouCsvPipeline': 363,
    # 'FDC_spider.pipelines.NanjingCsvPipeline': 364,
    # 'FDC_spider.pipelines.ShenzhenCsvPipeline': 365,
    # 'FDC_spider.pipelines.XuzhouCsvPipeline': 366,
    # 'FDC_spider.pipelines.GanzhouCsvPipeline': 367,
    # 'FDC_spider.pipelines.YichangCsvPipeline': 368,
    # 'FDC_spider.pipelines.XiangyangCsvPipeline': 369,
    # 'FDC_spider.pipelines.WuhanCsvPipeline': 370,
    # 'FDC_spider.pipelines.NanchongCsvPipeline': 371,
    # 'FDC_spider.pipelines.QingdaoCsvPipeline': 372,
    # 'FDC_spider.pipelines.ZhuhaiCsvPipeline': 373,
    # 'FDC_spider.pipelines.HuanggangCsvPipeline': 374,
    # 'FDC_spider.pipelines.ChongqingCsvPipeline': 375,
    # 'FDC_spider.pipelines.LianyungangCsvPipeline': 376,
    # 'FDC_spider.pipelines.YueyangCsvPipeline': 377,
    # 'FDC_spider.pipelines.DeyangCsvPipeline': 378,
    # 'FDC_spider.pipelines.ZhoushanCsvPipeline': 379,
    # 'FDC_spider.pipelines.GuanganCsvPipeline': 380,
    # 'FDC_spider.pipelines.ZiyangCsvPipeline': 381,
    # 'FDC_spider.pipelines.XiaoganCsvPipeline': 382,
    # 'FDC_spider.pipelines.JingmenCsvPipeline': 383,
    # 'FDC_spider.pipelines.TongrenCsvPipeline': 384,
    # 'FDC_spider.pipelines.HuangshiCsvPipeline': 385,

    # 'FDC_spider.pipelines.MongoClientPipeline': 350,  # 将数据保存到mongo
    # 'scrapy_redis.pipelines.RedisPipeline': 400,  # 将数据保存到redis的pipeline

    # 'FDC_spider.pipelines.SfpmCasePipeline': 300,
    # 'FDC_spider.pipelines.SfpmCaseImagesPipeline': 301,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


# 日志
today = datetime.datetime.now()
LOG_PATH = "{}/logs/{}_{}_{}.log".format(BOT_NAME, today.year, today.month, today.day)
# LOG_FILE = LOG_PATH
# LOG_LEVEL = "WARNING"
LOG_LEVEL = "DEBUG"

# UA池
USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
]

# 代理IP(用于堡垒机内网)
"""
PROXY = ['192.168.1.161:65500', '192.168.1.162:65500', '192.168.1.163:65500', '192.168.1.164:65500',
         '192.168.1.166:65500', '192.168.1.171:65500', '192.168.1.182:65500',
         '192.168.1.184:65500', '192.168.1.185:65500', '192.168.1.186:65500',
         '192.168.1.187:65500', '192.168.1.188:65500', '192.168.1.189:65500', '192.168.1.190:65500',
         '192.168.1.191:65500', '192.168.1.192:65500', '192.168.1.193:65500', '192.168.1.194:65500',
         '192.168.1.195:65500', '192.168.1.196:65500', '192.168.1.197:65500', '192.168.1.198:65500',
         '192.168.1.199:65500', '192.168.1.200:65500']
"""
PROXY = ['192.168.1.162:65500', '192.168.1.164:65500', '192.168.1.166:65500', '192.168.1.169:65500',
         '192.168.1.182:65500', '192.168.1.183:65500', '192.168.1.185:65500', '192.168.1.186:65500',
         '192.168.1.187:65500', '192.168.1.188:65500', '192.168.1.189:65500', '192.168.1.190:65500',
         '192.168.1.191:65500', '192.168.1.192:65500', '192.168.1.193:65500', '192.168.1.194:65500',
         '192.168.1.195:65500', '192.168.1.196:65500', '192.168.1.197:65500', '192.168.1.198:65500',
         '192.168.1.199:65500', '192.168.1.200:65500', ]
# 代理API
PROXY_URL = 'http://183.230.7.247:10888/getip?addr=&tof=f'

# Retry Middleware 是否启用,默认：True
# RETRY_ENABLED = False
# 最多的重试次数
RETRY_TIMES = 20
# 重试的 response 返回值(code)
RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 404, 408, 407]

# 下载超时
DOWNLOAD_TIMEOUT = 30

# MONGODB
"""
1.replicaSet的name为空则不使用集群配置
2.user和pwd为空则不需要进行密码校验
"""
MONGODB = {
    'host': '192.168.5.236',
    'port': '27017',
    'user': '',
    'pwd': '',
    'replicaSet': {
        'name': '',
        "members": [
            {
                "host": "localhost",
                "port": "27017"
            },
        ]
    }
}

# redis(方式一)
REDIS_URL = "redis://127.0.0.1:6379/6"
# redis(方式二)
# REDIS_HOST = "127.0.0.1"
# REDIS_PORT = 6379

# 开启自定义扩展
EXT_ENABLED = True
# 配置允许的空闲时长
# IDLE_NUM*5  秒后，爬虫程序关闭
IDLE_NUM = 120

# 设置图片保存目录
IMAGES_STORE = os.path.join((os.path.dirname(__file__)), 'images')
# 设置item的对应字段(默认image_urls、images)
IMAGES_URLS_FIELD = 'imageUrls'
IMAGES_RESULT_FIELD = 'imagesInfo'
# 设置图片有效期(默认90天)
IMAGES_EXPIRES = 900
# 设置小图片的过滤范围
# IMAGES_MIN_HEIGHT = 110
# IMAGES_MIN_WIDTH = 110
# 生成缩略图，key为文件夹名，value为(长*宽)
# IMAGES_THUMBS = {
#     'small': (50, 50),
#     'big': (270, 270),
# }

"""
司法拍卖
1.【注意】如果SFPM_PROVINCE参数被注释，进行全国爬取;SFPM_PROVINCE为空列表，不进行爬取
2.【注意】如果只设置省份(直辖市)，不设置城市，默认爬取该省份(直辖市)的所有城市
3.【注意】如要设置城市，需先设置(全国爬取时不需要设置)该城市所在 省份(直辖市)，
4.【注意】不要带 省、市 等后缀(如四川省，应设置为四川)
"""
# 设置 省份(直辖市)
SFPM_PROVINCE = ['重庆', '四川']
# 设置 城市
SFPM_CITY = {'重庆': ['重庆', ],
             '四川': ['成都', ],
             }
# 开启redis案例增量去重(默认开启)
SFPM_CASE_REDIS = False
