# boos6.py
import json
import urllib
import sys
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import time
from datetime import datetime
import csv
import argparse  # 导入 argparse 库
from util.city_code_finder import CityCodeFinder  # 导入 CityCodeFinder 类
from storage.storage_factory import StorageFactory  # 导入 StorageFactory 类
def main(city_code, query, default_page=1, max_pages=30):
    retries = 0
    max_retries = 5  # 设置最大重试次数
    all_job_data = []  # 初始化一个空列表来存储所有页面的数据

    for page in range(default_page, default_page + max_pages):
        while retries < max_retries:
            # 生成时间戳
            timestamp = datetime.now().strftime('%Y%m%d%H%M%S')

            # 设置Chrome驱动
            chrome_options = Options()
            chrome_options.add_argument("--window-size=1920x1080")  # 设置窗口大小
            service = Service(ChromeDriverManager().install())
            driver = webdriver.Chrome(service=service, options=chrome_options)

            # 打开目标网页
            driver.get(f"https://www.zhipin.com/web/geek/job?query={query}&city={city_code}&page={page}")

            # 等待页面加载
            time.sleep(20)

            # 获取XHR请求的数据
            xhr_url = "https://www.zhipin.com/wapi/zpgeek/search/joblist.json"
            params = {
                "scene": "1",
                "query": query,
                "city": city_code,
                "experience": "",
                "payType": "",
                "partTime": "",
                "degree": "",
                "industry": "",
                "scale": "",
                "stage": "",
                "position": "",
                "jobType": "",
                "salary": "",
                "multiBusinessDistrict": "",
                "multiSubway": "",
                "page": str(page),
                "pageSize": "30"
            }

            # 使用Selenium发送请求
            response = driver.execute_script(f"""
                var xhr = new XMLHttpRequest();
                xhr.open('GET', '{xhr_url}?{urllib.parse.urlencode(params)}', false);
                xhr.setRequestHeader('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36');
                xhr.setRequestHeader('Referer', 'https://www.zhipin.com/web/geek/job?query={query}&city={city_code}&page=1');
                xhr.send(null);
                return xhr.responseText;
            """)
            data = json.loads(response)
            print(data)

            # 检查 code 是否为 0
            if data.get('code') == 0:
                # 关闭浏览器
                driver.quit()

                # 提取 jobList 数据
                job_list = data['zpData']['jobList']
                all_job_data.extend(job_list)  # 将当前页的数据添加到总列表中

                break  # 成功获取数据后退出循环
            else:
                print(f"Code is not 0, retrying... ({retries + 1}/{max_retries})")
                driver.quit()
                retries += 1
                time.sleep(5)  # 等待一段时间后重试

        if retries == max_retries:
            print(f"达到最大重试次数 {max_retries}，程序退出。")
            break  # 跳出外层循环，结束分页

    # 创建 StorageFactory 实例
    factory = StorageFactory()

    # 获取存储实例
    storage = factory.get_storage()

    # 构建文件名
    json_filename = f"job-{city_code}-{query}-{timestamp}.json"
    csv_filename = f"job-{city_code}-{query}-{timestamp}.csv"

    # 将所有数据写入 JSON 文件
    storage.save_json(all_job_data, json_filename)

    # 将所有数据写入 CSV 文件
    storage.save_csv(all_job_data, csv_filename)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Boss直聘数据采集")
    parser.add_argument('--city', type=str, required=True, help='City name')
    parser.add_argument('--query', type=str, required=True, help='Job query keyword')
    parser.add_argument('--default-page', type=int, default=1, help='Default page number')
    parser.add_argument('--max-pages', type=int, default=5, help='Maximum number of pages to scrape')
    args = parser.parse_args()

    # 创建 CityCodeFinder 实例
    json_file_path = "../json/cn_weather_citylist_compressed.json"
    city_finder = CityCodeFinder(json_file_path)

    # 获取城市代码
    city_code = city_finder.get_city_code(args.city)
    if not city_code:
        print(f"未找到: {args.city} 城市代码")
        sys.exit(1)  # 退出程序

    main(city_code, args.query, args.default_page, args.max_pages)