import os
import time
import pandas as pd
import schedule

from excel_link.read_excel import get_url_list
from playwrite.my_playwrite_auto import my_play_write_automation
from tools.mydatetime_utils import get_current_time, get_today_date
from tools.path_utils import my_path_join, project_root_path


def scrapy_main():
    output_folder = my_path_join(project_root_path, "output")
    # 第1步，获取url_list
    url_list = get_url_list()

    # 第2步，访问网站
    my_play_write_automation.start_browser()

    # 初始化一个列表，用于存储所有数据
    data = []

    for url in url_list:
        is_success = my_play_write_automation.navigate_to_site(url)
        if not is_success:
            continue

        # 获取 app_name
        app_name = my_play_write_automation.get_app_name()

        # 点击关于按钮
        my_play_write_automation.click_about_button()

        # 获取版本和更新时间
        version, update_date = my_play_write_automation.get_version_and_date_time()

        # 获取当前爬取时间和日期
        current_date_time = get_current_time()
        current_date = get_today_date()

        # 保存数据到列表
        data.append({
            "url": url,
            "app_name": app_name,
            "version": version,
            "update_date": update_date,
            "crawl_date_time": current_date_time
        })

    # 将数据保存到 Excel 文件
    df = pd.DataFrame(data)
    output_file_path = os.path.join(output_folder, f"crawl_{current_date}.xlsx")
    df.to_excel(output_file_path, index=False)

    print(f"数据已成功保存到 {output_file_path}")

#
# def schedule_scrapy():
#     # 每两天调度一次任务
#     schedule.every(2).days.do(scrapy_main)
#
#     # 不断运行，检查任务
#     while True:
#         schedule.run_pending()
#         time.sleep(60)  # 每隔一分钟检查一次任务是否需要运行
#
# # 启动调度程序
# schedule_scrapy()

scrapy_main()