import requests
from lxml import etree
import pandas as pd
import time
import random

# 获取输入
city = input('请输入要爬取的城市：')
page = int(input('请输入要爬取的页码数：'))

# 头部信息
headers = {
    'cookie': 'x-zp-client-id=b7514870-1746-43e8-9332-52a37deb7916; sajssdk_2015_cross_new_user=1; locationInfo_search={%22code%22:%222406%22%2C%22name%22:%22%E9%95%BF%E6%B2%99%E5%8E%BF%22%2C%22message%22:%22%E5%8C%B9%E9%85%8D%E5%88%B0%E5%B8%82%E7%BA%A7%E7%BC%96%E7%A0%81%22}; zp_passport_deepknow_sessionId=3c83850fs0ee4a457e9098de512d58ae334f; at=f3576da8d5c047f7b6038437d1e48c14; rt=335228cd70124e6e939c654ff8c72d4b; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221185211814%22%2C%22first_id%22%3A%2218f9ad520631cd-00df5770b96a674-26001c51-1194912-18f9ad5206417ca%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.google.com.hk%2F%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMThmOWFkNTIwNjMxY2QtMDBkZjU3NzBiOTZhNjc0LTI2MDAxYzUxLTExOTQ5MTItMThmOWFkNTIwNjQxN2NhIiwiJGlkZW50aXR5X2xvZ2luX2lkIjoiMTE4NTIxMTgxNCJ9%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%221185211814%22%7D%2C%22%24device_id%22%3A%2218f9ad520631cd-00df5770b96a674-26001c51-1194912-18f9ad5206417ca%22%7D; selectCity_search=530; acw_tc=276082a817162946506754134e098f1307592611a02924fcc4b9ad3adf945b',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
}

# 城市对应编码字典
city_code_dict = {
    '上海': 538, '北京': 530, '广州': 763, '深圳': 765, '天津': 531,
    '武汉': 736, '西安': 854, '成都': 801, '南京': 635, '杭州': 653,
    '重庆': 551, '厦门': 682, '大连': 600
}

city_code = city_code_dict[city]

dataframe_list = []  # 用于存储数据的列表

for p in range(1, page + 1):
    url = f'https://sou.zhaopin.com/?jl={city_code}&kw=数据分析&p={p}'
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        html = etree.HTML(response.text)

        # 信息提取
        job_names = html.xpath('//div[@class="joblist-box__item clearfix"]//div[@class="jobinfo__top"]/a/text()')
        salaries = html.xpath('//div[@class="jobinfo"]//div[@class="jobinfo__top"]/p/text()')
        locations = html.xpath('//div[@class="jobinfo__other-info"]/div[1]/text()')
        experiences = html.xpath('//div[@class="jobinfo__other-info"]/div[2]/text()')
        educations = html.xpath('//div[@class="jobinfo__other-info"]/div[3]/text()')
        companies = html.xpath('//div[@class="companyinfo__top"]/a/text()')

        # 每个职位信息作为字典存储，并添加到列表中
        for job_name, salary, location, experience, education, company in zip(job_names, salaries, locations,
                                                                              experiences, educations, companies):
            dataframe_list.append(
                {'职位名称': job_name, '薪资': salary, '地点': location, '经验': experience, '教育': education,
                 '公司': company})

        print(f'已获取第{p}页数据')
        time.sleep(random.randint(3, 6))  # 随机延时
    else:
        print(f'请求第{p}页数据失败，状态码：{response.status_code}')

# 将列表转换为DataFrame
df = pd.DataFrame(dataframe_list)

# 保存到CSV文件
df.to_csv('智联招聘数据.csv', index=False, encoding='utf_8_sig')
print('数据爬取完成并已保存到CSV文件。')