#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
直接运行Amazon热销商品爬虫的脚本
"""

import sys
import os

# 添加项目路径到Python路径
project_root = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, project_root)

# 添加爬虫项目路径
spider_path = os.path.join(project_root, 'crawProj', 'distributed_weibo_spider')
sys.path.insert(0, spider_path)

def run_spider():
    """
    运行Amazon爬虫
    """
    print("开始运行Amazon热销商品爬虫...")
    
    try:
        # 导入Scrapy相关模块
        from scrapy.crawler import CrawlerProcess
        from scrapy.utils.project import get_project_settings
        
        # 设置环境变量使Scrapy能找到配置
        os.environ['SCRAPY_SETTINGS_MODULE'] = 'crawProj.distributed_weibo_spider.settings'
        
        # 获取Scrapy项目设置
        settings = get_project_settings()
        
        # 调整设置用于爬取前10个商品和评论，进一步降低频率以应对反爬虫机制
        settings.set('LOG_LEVEL', 'INFO')
        settings.set('CONCURRENT_REQUESTS', 1)
        settings.set('CONCURRENT_REQUESTS_PER_DOMAIN', 1)
        settings.set('DOWNLOAD_DELAY', 5)  # 进一步增加延迟
        settings.set('RANDOMIZE_DOWNLOAD_DELAY', True)  # 随机化延迟
        settings.set('DUPEFILTER_DEBUG', True)
        settings.set('ES_HOST', 'http://1.94.107.109:9200')
        settings.set('ES_USERNAME', 'elastic')
        settings.set('ES_PASSWORD', '07122201hm')
        settings.set('RETRY_TIMES', 5)  # 增加重试次数
        settings.set('RETRY_HTTP_CODES', [429, 500, 502, 503, 504])  # 添加429到重试代码列表
        
        # 动态导入爬虫类
        from crawProj.distributed_weibo_spider.spiders.amazon_spider import AmazonBestSellersSpider
        
        # 创建爬虫进程
        process = CrawlerProcess(settings)
        
        # 添加爬虫
        process.crawl(AmazonBestSellersSpider)
        
        # 启动爬虫
        print("正在启动爬虫...")
        process.start()
        
        print("爬虫运行完成")
        
    except Exception as e:
        print(f"运行爬虫时出错: {e}")
        import traceback
        traceback.print_exc()

if __name__ == '__main__':
    run_spider()