# 爬取招聘网站某城市所有岗位的招聘数据；
# 招聘网站任选其一：拉勾网，Boss直聘，智联招聘，51Job，前程无忧；
# 分析页面，结合正则化，爬取招聘数据，保存为csv文件，字段不限，数据量不少于1w条

# 步骤如下:
# 目标51Job的招聘数据--链接地址:https://we.51job.com/pc/search?industry=01&keyword=&searchType=6&sortType=3&metro=
# 1.加载URL
# 2.查看api地址的调用规律
# 3.初始化好selenium
# 4.通过selenium点击和搜索从网页源代码中分析出帖子的信息
# 5.导入L.csv文件

import time
import csv
from selenium.webdriver import Edge

# 初始化一下selenium
options = {
    "browserName": "MicrosoftEdge",
    "version": "",
    "platform": "WINDOWS",
    "ms:edgeOptions": {
        "extensions": [], "args": ["--disable-blink-features=AutomationControlled"]  # 添加参数
    }
}

web = Edge(capabilities=options)
web.get("https://we.51job.com/pc/search?industry=01&keyword=&searchType=6&sortType=3&metro=")
time.sleep(6)


def get_recruit():
    for j in range(49):
        divs = web.find_elements_by_xpath('//*[@class="joblist-item"]')
        for one in divs:
            name = one.find_elements_by_xpath('.//*[@class="cname text-cut"]')[0].text
            text = one.find_elements_by_xpath('.//*[@class="jname text-cut"]')[0].text
            csvwriter.writerow([name, text])
        # 执行向下滚动鼠标滚轮的操作
        web.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)
        # 前往下一页
        ele = web.find_element_by_xpath(
            '//*[@id="app"]/div/div[2]/div/div/div[2]/div/div[2]/div/div[3]/div/div/div/button[2]')
        ele.click()
        time.sleep(2)


if __name__ == '__main__':
    # 计时
    t1 = time.time()
    # 初始化帖子数据文件
    f = open("bak/L.csv", mode="w", encoding="utf-8")
    csvwriter = csv.writer(f)
    csvwriter.writerow(["公司名称", "招聘信息"])
    # 执行爬取
    get_recruit()

    t2 = time.time()
    f.close()
    print("over!")
    print(f"花费时间为:{t2 - t1:.2f}秒")
