from airflow import DAG  
from airflow.operators.python import PythonOperator  
from datetime import datetime
import sys
import os
import pendulum

default_args = {  
    'owner': 'airflow',  
    'start_date': pendulum.datetime(2024, 4, 15, tz="Asia/Shanghai"),  
}  
  
dag = DAG('kuaixun_crawler', 
          default_args=default_args, 
          schedule_interval='*/30 * * * *',  # 每30分钟执行一次
          catchup=False,
        )  
  
def run_scrapy_crawler(**kwargs):
     # 获取当前文件的绝对路径  
    current_file_path = os.path.abspath(__file__)  
    # 获取当前文件的目录  
    current_directory = os.path.dirname(current_file_path)  
    # 获取上级目录  
    parent_directory = os.path.dirname(current_directory)  
    # 将上级目录添加到sys.path  
    sys.path.append(parent_directory)
    os.environ['XDG_CONFIG_HOME'] = parent_directory
    from pprint import pprint
    from scrapy.crawler import CrawlerProcess  
    from scrapy.utils.project import get_project_settings  
    from wisdom.crawler.spiders import EskuaixunSpider  # 替换为你的Scrapy项目和爬虫名  
    settings = get_project_settings()
    settings.set('LOG_ENABLED', False)
    # print("==================================")
    # print(os.getcwd())
    # for k, v in settings.items():
    #     print(f"{k}: {v}")
    # print("==================================")
    process = CrawlerProcess(settings)  
    process.crawl(EskuaixunSpider)  
    process.start()  
  
run_scrapy_task = PythonOperator(  
    task_id='run_scrapy_crawler',  
    python_callable=run_scrapy_crawler,  
    provide_context=True,  
    dag=dag,  
)