from bs4 import BeautifulSoup  # 网页解析，获取数据
import urllib.request,urllib.error  # 制定URL，获取网页
from urllib import parse
import json
import time
import pymysql
import requests
from lxml import etree

head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36"
    }
# kw = input("请输入你要搜索的岗位关键字;")
kw = "python"
keyword = parse.quote(parse.quote(kw))
pageNum = 1

jobList = []  # 所有工作岗位信息，放到列表中，每个列表的元素，是上面的字典

# 主流程
def main():
    # initDB()

    for i in range(1,10):  # 1-10
        # 前程无忧网址路径：https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,1.html
        url = "https://search.51job.com/list/000000,000000,0000,00,9,99," + keyword + ",2," + str(i) + ".html"
        pageList = getLink(url)  # 爬取一个列表页，获取该页全部岗位链接
        print("正在爬取第%d页数据" % i)
        if len(pageList) == 0:
            break
        for jobpage in pageList:
            getData(jobpage)  # 一个详细页的链接
    # 将爬取到的数据保存到数据库
    saveDate()
    # for job in jobList:
    #     print(job)

def saveDate():
    #  将51job爬取的数据插入数据库中
    for job in jobList:
        #  存在几页不同的网页，数据为空
        try:
            name = job["title"]  # 职位
            company = job["company"]  # 公司
            salary = job["salary"]  # 薪资
            region = job["region"]  # 地区
            nature = job["nature"]  # 公司性质
            scale = job["scale"]  # 公司规模
            type = job["type"]  # 公司类型
            welfare = job["welfare"] # 福利
            recruiters = job["recruiters"]  # 招聘人群
            required = job["required"]  # 要求学历
            number = job["number"]  # 招聘人数
            time = job["time"]  # 招聘人数
        except:
            pass
        db = pymysql.connect(host='localhost',
                             user="root",
                             password="root",
                             db='51job',
                             charset='utf8')
        cursor = db.cursor()
        values = (name,company,salary,region,nature,scale,type,welfare,recruiters,required,number,time)
        sql = """insert into job (name,company,salary,region,nature,scale,type,welfare,recruiters,required,number,time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);
        """
        cursor.execute(sql,values)
        db.commit()
        cursor.close()
        db.close()


def getData(jobpage):

    resp = requests.get(jobpage, headers=head)
    text = resp.content.decode('gbk','ignore')
    html = etree.HTML(text)

    for job in jobList:
        if jobpage == job["link"]:
            try:
                name = html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/h1/text()')[0]  # 职业
                company = html.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[1]/a/p/text()')[0]  # 公司
                salary = html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/strong/text()')[0]  # 薪资
                info = html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/p[2]/text()')
                nature = html.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[1]/text()')[0]  # 公司性质
                scale = html.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[2]/text()')[0]  # 公司规模
                type = html.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[3]/a/text()')[0]  # 公司类型
                welfare = html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/div/div/span/text()')  # 福利
                welfare = ' '.join(welfare)
                job["title"] = name  # 将职位名字放入我们的字典中
                job["company"] = company  # 公司
                job["salary"] = salary  # 薪资
                region, sep, tail = info[0].partition('-')
                job["region"] = "".join(region.split())  # 地区
                job["nature"] = nature  # 公司性质
                job["scale"] = scale   # 公司规模
                job["type"] = type  # 公司类型
                job["welfare"] = welfare  # 福利
                job["recruiters"] = info[1].strip()  # 招聘人群
                job["required"] = info[2].strip()  # 要求学历
                job["number"] = info[3].strip()  # 招聘人数
                job["time"] = info[4].strip()  # 招聘人数

                time.sleep(0.3)  # 休息间隔，避免爬取海量数据时被误判为攻击，IP遭到封禁
            except IndexError:
                pass


def getLink(url):
    jobLink = []
    html = askURL(url)  # 获取列表页

    # html = open("joblist.html", "r")
    bs = BeautifulSoup(html, 'lxml')

    # 获得js封装数据
    eldiv = bs.findAll("script", type="text/javascript")[2]
    eldiv = eldiv.get_text().replace("window.__SEARCH_RESULT__ =", "")
    # 对js数据进行解析
    eldiv = json.loads(eldiv)
    eldiv = eldiv['engine_search_result']
    for item in eldiv:
        jobLink.append(item['job_href'])
        jobList.append({'link':item['job_href']})

        time.sleep(0.3)  # 休息间隔，避免爬取海量数据时被误判为攻击，IP遭到封禁
        # 跳转网页信息
        # print(item['job_name'])  # 职业名
        # print(item['job_href'])  # 超链接
    # print(jobList)
    return jobLink


def askURL(url):

    request = urllib.request.Request(url,headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("gbk",'ignore')

    except urllib.error.URLError as e:
        pass
        # if hasattr(e,"code"):
        #     print(e.code)
        # if hasattr(e,"reason"):
        #     print(e.reasoon)
    return html




if __name__ == "__main__":
    main()