import sys
from datetime import datetime
import csv
import argparse  # 导入 argparse 库

from WebScraper.WebScraper import WebScraper
from util.city_code_finder import CityCodeFinder  # 导入 CityCodeFinder 类
from storage.storage_factory import StorageFactory  # 导入 StorageFactory 类



def main(city_code, query, default_page=1, max_pages=30):
    all_job_data = []

    for page in range(default_page, default_page + max_pages):
        scraper = WebScraper(city_code, query, page)
        job_data = scraper.fetch_job_data()
        if job_data:
            all_job_data.extend(job_data)

    # 创建 StorageFactory 实例
    factory = StorageFactory()

    # 获取存储实例
    storage = factory.get_storage()

    # 生成时间戳
    timestamp = datetime.now().strftime('%Y%m%d%H%M%S')

    # 构建文件名
    json_filename = f"job-{city_code}-{query}-{timestamp}.json"
    csv_filename = f"job-{city_code}-{query}-{timestamp}.csv"

    # 将所有数据写入 JSON 文件
    storage.save_json(all_job_data, json_filename)

    # 将所有数据写入 CSV 文件
    storage.save_csv(all_job_data, csv_filename)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Boss直聘数据采集")
    parser.add_argument('--city', type=str, required=True, help='City name')
    parser.add_argument('--query', type=str, required=True, help='Job query keyword')
    parser.add_argument('--default-page', type=int, default=1, help='Default page number')
    parser.add_argument('--max-pages', type=int, default=5, help='Maximum number of pages to scrape')
    args = parser.parse_args()

    # 创建 CityCodeFinder 实例
    json_file_path = "json/cn_weather_citylist_compressed.json"
    city_finder = CityCodeFinder(json_file_path)

    # 获取城市代码
    city_code = city_finder.get_city_code(args.city)
    if not city_code:
        print(f"未找到: {args.city} 城市代码")
        sys.exit(1)  # 退出程序

    main(city_code, args.query, args.default_page, args.max_pages)