import requests
import pandas as pd
import time
from requests.exceptions import RequestException

print("2207124003 许熔婷 220706 大数据管理与应用")

# 全局配置（假设列表页接口和参数正确）
BASE_URL = "https://www.5iai.com/api/enterprise/job/public/es"  # 列表页接口
PAGE_SIZE = 10
MAX_PAGES = 21
SAVE_INTERVAL = 5
DETAIL_SLEEP = 2
OUTPUT_FILE = "information.csv"

# 请求头（保持不变）
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    'Accept': 'application/json, text/plain, */*',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    'Referer': 'https://www.5iai.com/jobList',
    'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"'
}

# 初始化文件
with open(OUTPUT_FILE, 'w', encoding='utf-8-sig') as f:
    f.write('')


# 安全提取函数
def safe_extract(data, keys, default=''):
    for key in keys:
        if isinstance(data, dict) and key in data:
            data = data[key]
        else:
            return default
    return data


# 主爬取逻辑
for page_number in range(1, MAX_PAGES + 1):
    try:
        print(f"\n正在爬取第 {page_number} 页数据（共 {MAX_PAGES} 页）")
        params = {
            'page': page_number,
            'size': PAGE_SIZE
        }
        response = requests.get(BASE_URL, headers=headers, params=params, timeout=10)
        response.raise_for_status()

        json_data = response.json()
        if not json_data.get('data') or not json_data['data'].get('content'):
            print("警告：当前页数据为空，可能已到达最后一页")
            continue

        df = pd.DataFrame(json_data['data']['content'])

        # 关键修正：将 enterpriseInfo 改为 enterpriseExtInfo
        df['detailedAddress'] = df['enterpriseAddress'].apply(lambda x: safe_extract(x, ['detailedAddress']))
        df['shortName'] = df['enterpriseExtInfo'].apply(lambda x: safe_extract(x, ['shortName']))  # 修正字段名
        df['industry'] = df['enterpriseExtInfo'].apply(lambda x: safe_extract(x, ['industry']))  # 修正字段名
        df['personnelScope'] = df['enterpriseExtInfo'].apply(lambda x: safe_extract(x, ['personnelScope']))  # 修正字段名
        df['econKind'] = df['enterpriseExtInfo'].apply(lambda x: safe_extract(x, ['econKind']))  # 修正字段名

        colNames = [
            'positionName', 'minimumWage', 'maximumWage', 'exp',
            'educationalRequirements', 'detailedAddress',
            'shortName', 'industry', 'personnelScope', 'econKind', 'id'
        ]
        df = df[colNames].copy()

        # 爬取详情页
        job_descriptions = []
        for job_id in df['id']:
            try:
                print(f"  正在获取职位 {job_id} 详情", end='')
                sub_params = {'id': job_id}
                sub_response = requests.get(BASE_URL, headers=headers, params=sub_params, timeout=10)
                sub_response.raise_for_status()

                sub_json = sub_response.json()
                job_desc = safe_extract(sub_json, ['data', 'jobRequirements'])
                job_descriptions.append(job_desc)
                print(f" ✔️")
            except RequestException as e:
                job_descriptions.append(f"请求失败：{str(e)[:50]}")
                print(f" ❌")
            time.sleep(DETAIL_SLEEP)

        df['jobDes'] = job_descriptions
        df.to_csv(OUTPUT_FILE, mode='a', index=False, header=(page_number == 1), encoding='utf-8-sig')
        print(f"  第 {page_number} 页数据保存成功，包含 {len(df)} 条记录")

    except RequestException as e:
        print(f"\n❌ 第 {page_number} 页爬取失败：{str(e)}")
        continue
    except Exception as e:
        print(f"\n⚠️ 第 {page_number} 页处理异常：{str(e)}")
        continue
    finally:
        time.sleep(SAVE_INTERVAL)

print("\n🎉 所有页面爬取完成！数据已保存至", OUTPUT_FILE)