import requests
import pandas as pd
from bs4 import BeautifulSoup
import time
import random

def get_job_data():
    # 设置请求头
    headers = {
        'User-Agent': '###自己浏览器的User-Agent###'
    }
    
    # 目标URL
    url = 'https://www.zhipin.com/school/?ka=tab_school_recruit_click'
    
    try:
        # 发送请求
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 检查请求是否成功
        
        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 初始化数据列表
        job_data = []
        
        # 查找所有职位信息
        job_items = soup.find_all('li', class_='job-card-wrapper')
        
        for idx, item in enumerate(job_items, 1):
            # 职位详情页链接
            job_link = item.find('a', class_='job-info-box')
            job_href = 'https://www.zhipin.com' + job_link['href'] if job_link else ''
            # 职位名称
            job_name = item.find('span', class_='job-name')
            job_name = job_name.text.strip() if job_name else ''
            # 薪资
            job_salary = item.find('span', class_='job-salary')
            job_salary = job_salary.text.strip() if job_salary else ''
            # tag-list 里的内容
            tag_list = item.find('ul', class_='tag-list')
            tags = [tag.text.strip() for tag in tag_list.find_all('li')] if tag_list else []
            location = tags[0] if len(tags) > 0 else ''
            education = tags[2] if len(tags) > 2 else ''
            welfare = tags[3] if len(tags) > 3 else ''
            # 公司名称
            company_info = item.find('a', class_='company-info')
            company_name = ''
            if company_info:
                company_name_span = company_info.find('span', class_='company-name')
                company_name = company_name_span.text.strip() if company_name_span else ''
            # 公司标签
            company_tag_list = item.find('ul', class_='company-tag-list')
            company_tags = '、'.join([li.text.strip() for li in company_tag_list.find_all('li')]) if company_tag_list else ''

            # 打印当前爬取进度
            print(f"正在爬取第{idx}条：职位名称={job_name}，公司名称={company_name}")


            # 添加到数据列表
            job_data.append({
                '职位名称': job_name,
                '薪资': job_salary,
                '工作地点': location,
                '学历': education,
                '职位福利': welfare,
                '公司名称': company_name,
                '公司标签': company_tags,
                '职位详情页链接': job_href
            })
            
            # 随机延时，避免请求过快
            time.sleep(random.uniform(0.5, 1.2))
        
        # 创建DataFrame
        df = pd.DataFrame(job_data)
        
        # 保存到Excel文件
        df.to_excel('数据爬取01.xlsx', sheet_name='Sheet1', index=False)
        print('数据已成功保存到 数据爬取.xlsx')
        
    except Exception as e:
        print(f'爬取过程中出现错误: {str(e)}')

if __name__ == '__main__':
    get_job_data()
