'''
coding: utf-8
@Software: PyCharm
@Author: JiangDa
@File: XiaoZhaoSpider.py
@Time: 2023/8/21 021 20:22
@Description: 智联校园招聘爬虫，采用json解析的方式
'''

import random
import time
import pandas as pd
import requests


def get_data():
    # 总数据列表
    all_position_list = []

    # 定义爬取页数  31就是爬取30页  ]右闭
    for page_num in range(1, 31):

        # 请求地址  注意，F12开发者页面，智联校招搜索职位后，查看网络相应请求，复制请求地址
        url = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=Java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=12&pageSize=30&_v=0.60724090&at=38593014befe485981124363ba8ae066&rt=72fb5a62e6984b159400c992277a190a&x-zp-page-request-id=0283462e8f6b45bc8aa47b27dd689004-1692802137029-456004&x-zp-client-id=0511586c-3ad9-4d4b-b17e-42cbf8bb8636'

        # 用户代理
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
            "Cookie": 'x-zp-client-id=0511586c-3ad9-4d4b-b17e-42cbf8bb8636; sensorsdata2015jssdkchannel=%7B%22prop%22%3A%7B%22_sa_channel_landing_url%22%3A%22https%3A%2F%2Flanding.zhaopin.com%2Fregister%3Fidentity%3Dc%26channel_name%3Dbaidu_sem_track%26callback_id%3DIQJ0tYqm%26_data_version%3D0.5.0%26channel_utm_content%3Dty%26project%3Dzlclient%26channel_utm_medium%3Docpc%26tid%3Ds%26channel_link_type%3Dweb%26channel_utm_source%3DbaiduPC%26hash_key%3DuaKIYNjV6PvgIWUUAJ9n%26sat_cf%3D2%26channel_utm_campaign%3DPC_%25E7%2599%25BE%25E5%25BA%25A6%25E5%25A4%25A9%25E6%25B4%25A5%26channel_utm_term%3D120334%26_channel_track_key%3DF9g76MwM%26link_version%3D1%26channel_keyword_id%3D374538643019%26channel_ad_id%3D56983594046%26channel_account_id%3D2757477%26channel_keyword%3D%25E6%25B1%2582%25E8%2581%258C%2B%25E6%2599%25BA%25E8%2581%2594%26channel_adgroup_id%3D6617496549%26channel_campaign_id%3D183100027%26sdclkid%3DALei1526ArDDbJDl%26bd_vid%3D9199790395302290570%22%7D%7D; at=38593014befe485981124363ba8ae066; rt=72fb5a62e6984b159400c992277a190a; acw_tc=2760829c16928020743025990e605376048ce01768206524ad119d5bd94820; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221138121497%22%2C%22first_id%22%3A%22188a5b6fe2f93e-0a6b74f0329162-26031a51-1327104-188a5b6fe30699%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTg4YTViNmZlMmY5M2UtMGE2Yjc0ZjAzMjkxNjItMjYwMzFhNTEtMTMyNzEwNC0xODhhNWI2ZmUzMDY5OSIsIiRpZGVudGl0eV9sb2dpbl9pZCI6IjExMzgxMjE0OTcifQ%3D%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%221138121497%22%7D%2C%22%24device_id%22%3A%22188a5b6fe2f93e-0a6b74f0329162-26031a51-1327104-188a5b6fe30699%22%7D'
        }

        # 相关职位和当前页码
        data = {
            'S_SOU_FULL_INDEX': 'Java',
            'pageIndex': page_num
        }
        session = requests.Session()
        session.get(url=url, headers=headers)
        cookies = session.cookies
        res = requests.get(url=url, headers=headers, data=data, cookies=cookies)
        position_result = res.json()['data']['data']['list']
        if len(position_result) > 0:
            # 定义一个存放一页数据的列表
            position_list = []
            for position in position_result:
                position_lite = []
                # 岗位名称
                position_lite.append(position['name'])
                # 公司名称
                position_lite.append(position['companyName'])
                # 薪资
                position_lite.append(position['salary60'])
                # 城市
                position_lite.append(position['workCity'])
                # 公司规模
                position_lite.append(position['companySize'])
                # 企业类型
                position_lite.append(position['property'])
                # 学历
                position_lite.append(position['education'])
                # 岗位职责
                position_lite.append(position['jobSummary'])
                # 发布时间
                position_lite.append(position['publishTime'])
                # 详情页面
                position_lite.append(position['companyUrl'])

                # 添加进一页数据列表
                position_list.append(position_lite)

            print('已爬取第{}页\n'.format(page_num, position_list))
            print(position_list)
            # 总数据
            all_position_list += position_list

            # 转储CSV
            df = pd.DataFrame(data=all_position_list, columns=[
                '岗位名称', '公司名称', '薪资', '城市', '公司规模', '企业类型', '学历', '岗位职责', '发布时间', '详情页面'
            ])
            # 注意存储文件名
            df.to_csv('./java.csv', mode='w', encoding='utf-8', index=False)

            # sleep防止封禁IP
            wait_seconds = random.random() * 10 + 3
            print("等待{}s...".format(round(wait_seconds)))
            time.sleep(wait_seconds)

            print("爬取完毕！！！")


# 主函数  程序入口
if __name__ == "__main__":
    get_data()
