# Scrapy settings for eolcrawl project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os
from pathlib import Path
import datetime

BOT_NAME = "eolcrawl"

SPIDER_MODULES = ["eolcrawl.spiders"]
NEWSPIDER_MODULE = "eolcrawl.spiders"


# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "eolcrawl (+http://www.yourdomain.com)"

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#    "Accept-Language": "en",
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    "eolcrawl.middlewares.EolcrawlSpiderMiddleware": 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    "scrapy.downloadermiddlewares.retry.RetryMiddleware": 90,
    "scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware": 110,
    "eolcrawl.middlewares.RandomUserAgent": 400,
    "eolcrawl.middlewares.EolcrawlDownloaderMiddleware": 540,
    "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler": 543,
}

# 启用 Playwright 设置
# TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
# Playwright 设置（可选）
PLAYWRIGHT_BROWSER_TYPE = "chromium"  # 可以是 'chromium'、'firefox' 或 'webkit'
PLAYWRIGHT_LAUNCH_OPTIONS = {
    "headless": False,
    "timeout": 60 * 1000,  # 60 seconds
    # "slow_mo": 500,    # 放慢操作速度
}

# Set the page load timeout
PLAYWRIGHT_PAGE_LOAD_TIMEOUT = 60  # 60 seconds

# Set the script evaluation timeout
PLAYWRIGHT_SCRIPT_TIMEOUT = 60  # 60 seconds

# Set the selector timeout
PLAYWRIGHT_SELECTOR_TIMEOUT = 30  # 30 seconds
###
PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT = (60 * 1000,)  # 60 seconds
PLAYWRIGHT_DEFAULT_TIMEOUT = 60 * 1000  # 默认操作超时时间，单位为毫秒

PLAYWRIGHT_CONTEXT_ARGS = {"ignore_https_errors": True}

# PLAYWRIGHT_MAX_CONTEXTS = 8


# 2. 优化请求拦截规则
def should_abort_request(request):
    return request.resource_type in ["image", "stylesheet", "font", "media"] or any(
        ext in request.url
        for ext in [".jpg", ".css", ".png", ".gif", ".woff", ".woff2"]
    )


#验证需要图片,暂时打开
# PLAYWRIGHT_ABORT_REQUEST = should_abort_request


# 为 HTTP 和 HTTPS 指定下载处理程序
DOWNLOAD_HANDLERS = {
    "http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
    "https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
EXTENSIONS = {
    "scrapy.extensions.telnet.TelnetConsole": None,
}

EXTENSIONS = {
    "eolcrawl.extensions.SpiderMonitorExtension": 500,
}


# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
#    "eolcrawl.pipelines.EolcrawlPipeline": 300,
# }

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"

# Set settings whose default value is deprecated to a future-proof value
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
# 设置是否存储空结果
FEED_STORE_EMPTY = False

# 设置导出数据时的缩进级别为2，适用于可读性更好的JSON或XML导出
FEED_EXPORT_INDENT = 2

OUT_FILE = {
    "feeds": {
        "com": {
            "format": "jsonlines",
            "encoding": "utf-8",
            "store_empty": True,
            # 'overwrite':True,
            "item_filter": "eolcrawl.spiders.comspider.MyCustomFilter",
        },
    },
}

# FEED_FORMAT = 'json'  # 可以是 'json', 'csv', 'xml' 等
# FEED_EXPORT_ENCODING = "utf-8"
# # FEED_URI = 'issue_detail.json'  # 输出文件的路径和名称
# FEED_URI = "export_data/%(name)s_%(time)s.json"
# LOG_ENABLED = False
# FILES_STORE = 'pdfs/'
MEDIA_ALLOW_REDIRECTS = True

FEED_EXPORT_BATCH_ITEM_COUNT = 1000

# PRO_SPIDER_ROOT='/home/jhq/work/code/crawl/eduCrawl'


# 获取项目根目录（假设脚本在项目根目录下的子目录中）
PRO_SPIDER_ROOT = str(Path(__file__).resolve().parent.parent)
# PRO_SPIDER_ROOT = os.getenv("SPIDER_PRO_ROOT","/mycrawl")

# print(f"PRO_SPIDER_ROOT is {PRO_SPIDER_ROOT}")

# 日志配置

# 获取当前日期
current_date = datetime.datetime.now().strftime("%Y-%m-%d")

# 设置日志根目录
if os.getenv("LOG_PATH", None) is None: 
    LOG_PATH = str(Path(__file__).parent.parent / "logs")
else:
    LOG_PATH = os.getenv("SPIDER_LOGS_PATH")

if not os.path.exists(LOG_PATH):
    os.makedirs(LOG_PATH)

# Scrapy系统日志配置
LOG_ENABLED = True
LOG_LEVEL = "INFO"  #'CRITICAL'、'ERROR'、'WARNING'、'INFO' 或 'DEBUG'
# LOG_FILE = os.path.join(LOG_PATH, f"scrapy_system_{current_date}.log")
LOG_STDOUT = False
LOG_SHORT_NAMES = True

# 不禁用默认的日志配置
# LOG_ENABLED = False

FILES_STORE = PRO_SPIDER_ROOT
ITEM_PIPELINES = {
    "eolcrawl.pipelines.AttachmentsPipeline": 1,
    # "eolcrawl.pipelines.JsonWithEncodingPipeline":100
    "eolcrawl.pipelines.SqliteNoDuplicatesPipeline": 200,
}

# DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.DisableSSLVerificationContextFactory'


## 不向目标页面发送任何 referrer 信息。
REFERRER_POLICY = "no-referrer"
# 限制并发请求数量
# CONCURRENT_REQUESTS = 16
# 设置请求间隔（秒）
# DOWNLOAD_DELAY = 0.5
# 启用随机延迟
# RANDOMIZE_DOWNLOAD_DELAY = True
# 启用重试机制
RETRY_ENABLED = True
RETRY_TIMES = 3
DOWNLOAD_TIMEOUT = 120  # 增加下载超时时间
RETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429, 404]

## 将给予完全控制权 Playwright,即来自Scrapy请求的标头将被忽略
# 通过 Request.headers 属性或由Scrapy组件设置 （包括通过 Request.cookies 属性）。
PLAYWRIGHT_PROCESS_REQUEST_HEADERS = None


## 4. 添加浏览器上下文设置
PLAYWRIGHT_CONTEXT_ARGS = {
    "ignore_https_errors": True,
    "java_script_enabled": True,
    "viewport": {"width": 1920, "height": 1080},
    "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    "extraHTTPHeaders": {"Referer": ""},
}

## 5. 限制同时打开的浏览器上下文数量
PLAYWRIGHT_MAX_CONTEXTS = 8

#
# async def custom_headers(
#     *,
#     browser_type_name: str,
#     playwright_request: playwright.async_api.Request,
#     scrapy_request_data: dict,
# ) -> Dict[str, str]:
#     headers = await playwright_request.all_headers()
#     scrapy_headers = scrapy_request_data["headers"].to_unicode_dict()
#     headers["Cookie"] = scrapy_headers.get("Cookie")
#     return headers
#
# PLAYWRIGHT_PROCESS_REQUEST_HEADERS = custom_headers
# DOWNLOADER_CLIENTCONTEXTFACTORY = "eolcrawl.custom_context.CustomContextFactory"

DOWNLOADER_CLIENT_TLS_METHOD = "TLS"
DOWNLOADER_CLIENT_TLS_CIPHERS = "DEFAULT"
DOWNLOADER_CLIENT_TLS_VERIFY = False

AUTOTHROTTLE_ENABLED = True