# run_spider.py

import subprocess
import sys
import os

# 配置：Scrapy 项目的根目录（必须包含 scrapy.cfg）
#PROJECT_DIR = r"E:\StockFile\HuangSpider\myproject"  # 修改为你的项目路径
PROJECT_DIR = r"D:\tick_data\Projects\爬评论\my-crawler"  # 修改为你的项目路径

SPIDER_NAME = "guba_run"  # 爬虫的 name 属性


def run_scrapy_crawl():
    """
    自动执行: scrapy crawl <spider_name> --nolog
    """
    # 切换到 Scrapy 项目根目录
    if not os.path.exists(PROJECT_DIR):
        print(f"❌ 项目目录不存在: {PROJECT_DIR}")
        return

    os.chdir(PROJECT_DIR)
    print(f"📁 切换到项目目录: {os.getcwd()}")

    # 构建命令
    cmd = [
        sys.executable, "-m", "scrapy", "crawl", SPIDER_NAME,
        "--nolog",  # 不输出日志
    ]

    print(f"🚀 正在启动爬虫: {' '.join(cmd)}")

    try:
        result = subprocess.run(cmd, check=True, encoding='utf-8')
    except subprocess.CalledProcessError as e:
        print(f"❌ 爬虫运行失败，返回码: {e.returncode}")
    except FileNotFoundError:
        print("❌ 未找到 scrapy 命令，请确认已安装 Scrapy 且在正确环境中")
    except Exception as e:
        print(f"❌ 发生未知错误: {e}")


if __name__ == "__main__":
    run_scrapy_crawl()