import requests
import os
import time

# 创建保存图片的文件夹
image_folder = "jobfair_images"
if not os.path.exists(image_folder):
    os.makedirs(image_folder)

# 接口请求地址
base_url = "https://cgate.zhaopin.com/campussxhcv2/pcJobFair/searchJobFair?x-zp-client-id=104c6138-fbc1-4855-b735-369ae5761d3f"

# 请求头
headers_obj = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36"
}

def download_image(image_url, job_fair_name, page_num, item_num):
    """下载图片并保存到本地"""
    if not image_url:
        return False
        
    try:
        # 处理可能的相对路径
        if image_url.startswith('//'):
            image_url = 'https:' + image_url
            
        # 添加请求头，模拟浏览器
        response = requests.get(image_url, headers=headers_obj, timeout=10)
        response.raise_for_status()
        
        # 生成安全的文件名
        safe_name = "".join([c for c in job_fair_name if c.isalpha() or c.isdigit() or c in '._- '])
        file_ext = os.path.splitext(image_url)[1]
        if not file_ext:
            file_ext = '.jpg'  # 默认扩展名
            
        # 文件名包含页码和序号，避免重复
        file_path = os.path.join(image_folder, f"page{page_num}_{item_num}_{safe_name}{file_ext}")
        
        # 保存图片
        with open(file_path, 'wb') as f:
            f.write(response.content)
            
        print(f"   图片已保存: {os.path.basename(file_path)}")
        return True
        
    except Exception as e:
        print(f"   图片下载失败: {str(e)}")
        return False

def crawl_page(page_index):
    """爬取指定页码的数据"""
    # 请求参数，包含当前页码
    jsons_obj = {
        "jobFairName": "",
        "pageIndex": page_index,
        "pageSize": 40,
        "regionIds": [],
        "channel": "xiaoyuan",
        "platform": "13",
        "v": "0.61040606",
        "version": "0.0.0"
    }
    
    try:
        # 发起请求
        response = requests.post(base_url, headers=headers_obj, json=jsons_obj)
        print(f"\n===== 正在爬取第 {page_index} 页，状态码: {response.status_code} =====")
        
        # 检查请求是否成功
        response.raise_for_status()
        
        # 解析JSON数据
        json_data = response.json()
        
        # 提取数据列表和分页信息
        if "data" in json_data and "result" in json_data["data"]:
            result = json_data["data"]["result"]
            total_count = result.get("totalCount", 0)  # 总数据量
            total_pages = (total_count + 39) // 40  # 计算总页数（每页40条）
            
            if "list" in result and result["list"]:
                list_datas = result["list"]
                
                # 遍历数据并提取信息
                for idx, list_obj in enumerate(list_datas, 1):
                    try:
                        jobFairName = list_obj.get("jobFairName", "未知招聘会名称")
                        companyNum = list_obj.get("companyNum", "未知企业数量")
                        studentNum = list_obj.get("studentNum", "未知学生数量")
                        
                        print(f"\n{idx}. 招聘会名称: {jobFairName}")
                        print(f"   企业数量: {companyNum}")
                        print(f"   学生数量: {studentNum}")
                        
                        # 尝试获取图片URL（根据实际情况调整字段名）
                        image_url = list_obj.get("poster") or list_obj.get("logo") or list_obj.get("imageUrl")
                        
                        if image_url:
                            print(f"   图片URL: {image_url[:50]}...")  # 只显示部分URL
                            download_image(image_url, jobFairName, page_index, idx)
                        else:
                            print("   未找到图片URL")
                        
                        # 避免请求过于频繁
                        time.sleep(0.5)
                        
                    except Exception as e:
                        print(f"   处理第{idx}条数据时出错: {str(e)}")
                
                return True, total_pages  # 爬取成功，返回总页数
            else:
                print("该页没有数据")
                return False, total_pages
        else:
            print("数据结构不符合预期")
            return False, 0
            
    except requests.exceptions.HTTPError as e:
        print(f"HTTP请求错误: {e}")
        return False, 0
    except requests.exceptions.JSONDecodeError:
        print("无法解析JSON响应")
        return False, 0
    except Exception as e:
        print(f"发生错误: {e}")
        return False, 0

def main(start_page=1, max_pages=None):
    """主函数，控制分页爬取流程"""
    current_page = start_page
    total_pages = None
    
    while True:
        # 爬取当前页
        success, total = crawl_page(current_page)
        
        # 第一次获取总页数
        if total_pages is None and total > 0:
            total_pages = total
            print(f"\n发现总共有 {total_pages} 页数据")
            
            # 如果设置了最大爬取页数，取较小值
            if max_pages and max_pages < total_pages:
                total_pages = max_pages
                print(f"将爬取前 {total_pages} 页数据")
        
        # 检查是否继续爬取下一页
        if not success or (total_pages and current_page >= total_pages):
            break
            
        # 爬取下一页前等待一段时间
        print(f"\n等待 {2} 秒后爬取下一页...")
        time.sleep(2)
        current_page += 1
    
    print(f"\n===== 爬取完成，共爬取 {current_page - start_page + 1} 页数据 =====")

if __name__ == "__main__":
    # 可以指定起始页和最大爬取页数，例如：main(start_page=1, max_pages=5)
    main(start_page=1)  # 从第1页开始爬取，爬取所有页面
