# -*- coding: utf-8 -*-

import os
from pathlib import Path

# Scrapy settings for fire_control_spider project

BOT_NAME = 'fire_control_spider'

SPIDER_MODULES = ['fire_control_spider.sites.beijingfire']
NEWSPIDER_MODULE = 'fire_control_spider.sites.beijingfire'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20

# Configure a delay for requests for the same website (default: 0)
DOWNLOAD_DELAY = 0.05
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 10
CONCURRENT_REQUESTS_PER_IP = 32

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
  'Accept-Encoding': 'gzip, deflate',
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# Enable or disable spider middlewares
SPIDER_MIDDLEWARES = {
    'fire_control_spider.middlewares.SpiderMiddleware': 543,
}

# Enable or disable downloader middlewares
DOWNLOADER_MIDDLEWARES = {
    'fire_control_spider.middlewares.ProxyMiddleware': 350,
    'fire_control_spider.middlewares.UserAgentMiddleware': 400,
    'fire_control_spider.middlewares.RetryMiddleware': 500,
}

# Configure item pipelines
ITEM_PIPELINES = {
    'fire_control_spider.pipelines.ValidationPipeline': 100,
    'fire_control_spider.pipelines.DuplicatesPipeline': 200,
    'fire_control_spider.pipelines.async_download.AsyncMediaDownloadPipeline': 250,  # 异步下载管道
    'fire_control_spider.pipelines.JsonlWriterPipeline': 400,
    # 'fire_control_spider.pipelines.MediaDownloadPipeline': 300,  # 禁用默认的同步下载管道
}

# Enable and configure the AutoThrottle extension (disabled by default)
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 1
AUTOTHROTTLE_MAX_DELAY = 3
AUTOTHROTTLE_TARGET_CONCURRENCY = 12.0
AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 3600
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = [503, 504, 505, 500, 403, 404, 408, 429]

# 输出目录配置
OUTPUT_DIR = Path("outputs")
JSONL_MAX_SIZE = 1024 * 1024 * 1024  # 1GB
JSONL_MAX_RECORDS = 50000  # 5万条记录
IMAGE_MAX_COUNT = 10000  # 每个image目录最大1万张图片
CSV_MAX_RECORDS = 100000  # CSV文件最大10万行

# 重试配置
RETRY_TIMES = 3
RETRY_HTTP_CODES = [500, 502, 503, 504, 408, 429]

# 超时配置
DOWNLOAD_TIMEOUT = 180

# 完全禁用所有日志
import logging
logging.getLogger('scrapy').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.core').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.core.engine').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.core.scraper').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.statscollectors').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.downloadermiddlewares').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.spidermiddlewares').setLevel(logging.CRITICAL)
logging.getLogger('scrapy.extensions').setLevel(logging.CRITICAL)
logging.getLogger('chardet').setLevel(logging.CRITICAL)
logging.getLogger('fire_control_spider').setLevel(logging.CRITICAL)

LOG_LEVEL = 'CRITICAL'
LOG_ENABLED = False



# 禁用所有扩展
EXTENSIONS = {
    'scrapy.extensions.telnet.TelnetConsole': None,
    'scrapy.extensions.logstats.LogStats': None,
    'scrapy.extensions.corestats.CoreStats': None,
    'scrapy.extensions.memusage.MemoryUsage': None,
    'scrapy.extensions.memdebug.MemoryDebugger': None,
    'scrapy.extensions.closespider.CloseSpider': None,
    'scrapy.extensions.feedexport.FeedExporter': None,
    'fire_control_spider.extensions.StatsExtension': None,
}

# 代理配置
PROXY_ENABLED = False
PROXY_LIST = []

# 去重配置 - 使用默认的去重器
DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'

# 扩展配置
EXTENSIONS = {
    'fire_control_spider.extensions.StatsExtension': 500,
}

# 文件存储配置
FILES_STORE = str(OUTPUT_DIR)
IMAGES_STORE = str(OUTPUT_DIR) 

# 文件下载超时设置
DOWNLOAD_TIMEOUT = 600
FILE_DOWNLOAD_TIMEOUT = 1200
