from bs4 import BeautifulSoup
import pymysql
from selenium import webdriver
from selenium.webdriver import ChromeOptions
import time
import requests
if    __name__     == '__main__': #主函数
    job=["物流与供应链","前端开发"]
    option = ChromeOptions()
    UA="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"
    option.add_argument(f'user-agent={UA}')
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    web = webdriver.Chrome(chrome_options=option)
    web.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
        Object.defineProperty(navigator, 'webdriver', {
          get: () => undefined
        })
      """
    })
    web.implicitly_wait(3)
    url = 'https://www.51job.com/'
    web.get(url)
    time.sleep(6)
    page_list=[]
    for j in job:
        for i in range(1, 1 + 1):
            url="https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html?".format(j, i)
            web.get(url)
            html = web.page_source
            soup = BeautifulSoup(html, "lxml")
            text = soup.find_all("script", type="text/javascript")[3].string
            page_te = eval(str(text).split("=", 1)[1])["total_page"]
            page_list.append(page_te)
            print(page_te)
def spider(url):
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"}
    try:
        rep = requests.get(url, headers=headers)
        rep.raise_for_status()
        rep.encoding = rep.apparent_encoding
        txt = rep.text
        return txt
    except:
        print("解析失败")
def jiexi(html, info,name):
    soup = BeautifulSoup(html, "lxml")
    text = soup.find_all("script", type="text/javascript")[3].string
    #观察原始代码发现我们需要的数据在 engine_jds 后
    data = eval(str(text).split("=", 1)[1])["engine_jds"]
    for d in data:
        try:
            job_name = d["job_name"].replace("\\", "") # 岗位名称
        except:
            job_name = " "
        try:
            company_name = d["company_name"].replace("\\", "")  # 公司名称
        except:
            company_name = " "
        try:
            providesalary_text = d["providesalary_text"].replace("\\", "")  # 薪资
        except:
            providesalary_text = " "
        try:
            workarea_text = d["workarea_text"].replace("\\", "")   #工作地点
        except:
            workarea_text = " "
        try:
            jobwelf = d["jobwelf"].replace("\\", "")   # 工作待遇
        except:
            jobwelf = " "
        info.append( [ name,job_name, company_name, providesalary_text,workarea_text,jobwelf])
def save(info):
    for data in info:
        present_job = data[0]  # 当前爬取岗位
        job_name = data[1] #岗位
        company_name = data[2]           #公司名称
        providesalary_text = data[3]     #薪资
        workarea_text = data[4]          #工作地点
        jobwelf = data[5]  #工作待遇
        # 创建 sql 语句
        sql = "insert into jobs(present,job_name,company,salary,area,job_Requirements) values(%s,%s,%s,%s,%s,%s)"
        # 执行 sql 语句
        cursor.execute(sql, [present_job, job_name,company_name, providesalary_text,workarea_text, jobwelf])
        db.commit()  # 提交数据
# 1.4进行数据的爬取
if    __name__     == '__main__':
    job=["物流与供应链","前端开发"]
    page_list=['4','527']
    option = ChromeOptions()
    UA="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"
    option.add_argument(f'user-agent={UA}')
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    web = webdriver.Chrome(chrome_options=option)  # chrome_options=chrome_opt,,options=option
    web.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
        Object.defineProperty(navigator, 'webdriver', {
          get: () => undefined
        })
      """
    })
    web.implicitly_wait(10)
    url='https://www.51job.com/'
    web.get(url)
    time.sleep(6)
    le=len(job)
    db = pymysql.connect(
        host="localhost",
        user="root", password="20010422f",
        database="python", charset="utf8"
    )
    cursor = db.cursor()
    for j in range(0,le):
        for i in range(1,int(page_list[j])):#页面
            info = []
            url = "https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html?".format(job[j], i)
            web.get(url)
            ht = web.page_source
            soup = BeautifulSoup(ht, "lxml")
            jiexi(ht, info,job[j])
            print('岗位{}:{}/{}'.format(j,i,page_list[j]))
            time.sleep(2)
            save(info)
        time.sleep(3)
    cursor.close()
    db.close()