# BeautifulSoup或Xpath使用资源包提供的方法爬取相关数据

from bs4 import BeautifulSoup
import requests


def get_soup(url, header, bianma):
    try:
        req = requests.get(url, headers=header)
        req.encoding = bianma
        soup = BeautifulSoup(req.text, "html.parser")
    except:
        print("网络地址错误或网络信号不佳")
    divs = soup.findAll("div", {"class": "land-recruit-card_3pAuc"})
    return divs


def get_data(div_list):
    jobs = []
    h = ["岗位", "薪资", "工作经验", "学历", "公司联系人", "联系人职位", "公司名称", "公司类型", "员工人数", "上市情况"]

    jobs.append(h)

    for div in div_list:
        # 岗位
        occupation = div.find("a", {"class": "job-title"}).text
        print("岗位: " + occupation)
        #薪资
        salari = div.find("div", {"class": "salary-info"}).text
        print("薪资: " + salari)
        # 工作经验
        job_experience = div.findAll("div", {"class": "boss-tag grey"})[0].text
        print("工作经验: " + job_experience)
        #学历
        xueli = div.findAll("div", {"class": "boss-tag grey"})[1].text
        print("学历: " + xueli)
        #公司联系人
        contact = div.find("div", {"class": "boss-tag border none"}).span.text.split("｜")
        print("公司联系人: " + contact[0])
        #联系人职位
        print("联系人职位: " + contact[1])
        #公司名称
        company_title = div.find("div", {"class": "info-text"}).h3.text
        print("公司名称:" + company_title)
        #公司类型
        company_type = div.findAll("div", {"boss-tag border"})
        print("公司类型: " + company_type[0].text)
        #员工人数
        print("员工人数: " + company_type[1].text)
        #上市情况
        print("上市情况: " + company_type[2].text)

        print("-----------------------------------")
        job = [occupation, salari, job_experience, xueli, contact[0], contact[1], company_title, company_type[0].text, company_type[1].text, company_type[2].text]

        jobs.append(job)

    return jobs


url = 'https://www.kanzhun.com/rank_p/?ka=index-icon-click'
ua = {
    'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
}
bianma = "utf-8"
divs = get_soup(url, ua ,bianma)
jobs = get_data(divs)
print(jobs)








