import requests
from bs4 import BeautifulSoup # 导入包
import json
import re
import pymysql

conn = pymysql.connect(host='localhost',
                       user='root',
                       passwd='199408',
                       db='51haha',
                       port=3306,
                       charset='gbk')
cursor = conn.cursor()
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4098.3 Safari/537.36'}# 伪装请求头
keyword = input('请输入想要查询的职业1.python;2.java。：') # 用input函数，实现，查询想找的工作岗位。
list_href = []  # 创建一个空列表，用于存放二级页面url。
list_stair = []  # 创建一个空列表，用于存放一级页面抓取到的信息。
for i in range(1,6):#创建一个循环，每次循环更改一级页面网址参数，实现自动更新网页（1-5页）。
    url ='https://search.51job.com/list/170200,000000,0000,00,9,99,{},2,{}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(keyword,i)
    res = requests.get(url,headers = headers) # 访问链接
    res = res.text # 拿到数据
    ret = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>',res)#网页经过前段script渲染，用正则搜索数据。
    data = json.loads(ret[0])
    a = data['engine_search_result']# 拿出数据，赋值给a，通过观察数据，可以看出a是一个列表嵌套字典，每个字典对应一个职位信息，我们可以通过循环分别拿出每一个职位信息，然后进行提取数据。
    for i in range(len(a)):
        job_name = a[i]['job_name']
        providesalary = a[i]['providesalary_text']
        workarea = a[i]['workarea_text']
        year_limit = a[i]['attribute_text'][1]
        education = a[i]['attribute_text'][2]
        try:
            recruit = a[i]['attribute_text'][3]
        except IndexError:
            recruit = 0
        companytype = a[i]['companytype_text']
        company_name = a[i]['company_name']
        href = a[i]['job_href']
        list_href.append(href)
        list_stair.append([job_name,providesalary,workarea,year_limit,education,recruit,companytype,company_name])
        # sql = "INSERT INTO message(job_name,providesalary,workarea,year_limit,education,recruit,companytype,company_name) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)" % (
        #     job_name,providesalary,workarea,year_limit,education,recruit,companytype,company_name)
        # cursor.execute(sql)



list = [] # 创建一个空列表，用于存放二级页面抓取的信息。
for x in list_href:# 遍历二级页面网址列表。
    res = requests.get(x,headers=headers)# 访问网页
    res.encoding = 'gbk'#定义编码
    soup = BeautifulSoup(res.text,'html.parser')#解析数据
    company = soup.find_all('div',class_='tBorderTop_box')#通过观察网页源码，可以看出，二级页面信息，放在三个('div',class_='tBorderTop_box')标签中，我们用find——all 拿到的数据相当于一个列表，用list【0】【1】【2】的方式分别取出每个对应的信息。
    try:
        possion_message = company[0].text
    except IndexError:
        possion_message = '1'
    try:
        connect_way = company[1].text
    except IndexError:
        connect_way = '2'
    try:
        company_message = company[2].text
    except IndexError:
        company_message = '3'
    possion_message = possion_message.replace('\n', '').replace('\r', '')#数据清洗
    connect_way = connect_way.replace('\n', '').replace('\r', '')#数据清洗
    company_message = company_message.replace('\n', '').replace('\r', '')#数据清洗
    list.append([possion_message,connect_way,company_message])
    # sql = "INSERT INTO message(possion_message,connect_way,company_message) VALUES (%s,%s,%s)" % (
    # possion_message,connect_way,company_message)
    # cursor.execute(sql)

sql = "INSERT INTO message(yiji,erji) VALUES (list_stair,list)"

conn.commit()












