from turtle import pd
import jieba
# 导入库
import requests
from bs4 import BeautifulSoup
import pymysql
import random
from selenium import webdriver
from selenium.webdriver import ChromeOptions
import re
import time
import requests
# 1.2对每个岗位搜索的到的总页数进行爬取
if    __name__     == '__main__': #主函数
    job=["产品经理","产品助理","交互设计","前端开发","软件设计","IOS开发","业务分析","安卓开发","PHP开发","业务咨询","需求分析","流程设计"
    ,"售后经理","售前经理","技术支持","ERP实施","实施工程师","IT项目经理","IT项目助理","信息咨询","数据挖掘","数据运营","数据分析","网络营销",
    "物流与供应链","渠道管理","电商运营","客户关系管理","新媒体运营","产品运营"]
#总共30个职位的列表
    #https://www.pexels.com/
    option = ChromeOptions()
    UA="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"
    option.add_argument(f'user-agent={UA}')
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    web = webdriver.Chrome(chrome_options=option)  # chrome_options=chrome_opt,,options=option
    web.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
        Object.defineProperty(navigator, 'webdriver', {
          get: () => undefined
        })
      """
    })
    web.implicitly_wait(3)
    url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86,2,2.html?'
    web.get(url)
    time.sleep(6)
    page_list=[]
    for j in job:
        for i in range(1, 1 + 1):
            #url = "https://search.51job.com/list/000000,000000,0000,00,9,99," + j + ",2," + str(i) + ".html?"
            url="https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html?".format(j, i)
            web.get(url)
            html = web.page_source
            soup = BeautifulSoup(html, "lxml")
            text = soup.find_all("script", type="text/javascript")[3].string
            # 观察原始代码发现我们需要的数据在 engine_jds 后
            page_te=eval(str(text).split("=", 1)[1])["total_page"]
            page_list.append(page_te)
            print(page_te)
# 1.3进行爬取数据相关函数的设计
#定义 spider()函数，用于获取每个 url 的 html
def spider(url):
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"}
    try:
        rep = requests.get(url, headers=headers)
        rep.raise_for_status()
        rep.encoding = rep.apparent_encoding
        txt = rep.text
        return txt
    except:
        print("解析失败")
#定义 jiexi()函数，用于解析得到的 html
def jiexi(html, info,name):
    soup = BeautifulSoup(html, "lxml")
    text = soup.find_all("script", type="text/javascript")[3].string
    #观察原始代码发现我们需要的数据在 engine_jds 后
    data = eval(str(text).split("=", 1)[1])["engine_jds"]
    for d in data:
        try:
            job_name = d["job_name"].replace("\\", "") # 岗位名称
        except:
            job_name = " "

        try:
            company_name = d["company_name"].replace("\\", "")  # 公司名称

        except:
            company_name = " "

        try:
            providesalary_text = d["providesalary_text"].replace("\\", "")  # 薪资
        except:
            providesalary_text = " "
        try:
            workarea_text = d["workarea_text"].replace("\\", "")   #工作地点
        except:
            workarea_text = " "

        try:
            updatedate = d["updatedate"].replace("\\", "") #更新时间

        except:
            updatedate = " "
        try:
            jobwelf = d["jobwelf"].replace("\\", "")   # 工作待遇
        except:
            jobwelf = " "

        try:
            companyind_text = d["companyind_text"].replace("\\", "")  # 公司类型
        except:
            companyind_text = " "
        try:
            companysize_text = d["companysize_text"].replace("\\", "") # 公司规模

        except:
            companysize_text = " "
        try:
            at = d["attribute_text"]   # 工作要求
            s = ''
            for i in range(0, len(at)):
                s = s + at[i] + ','
                attribute_text = s[:-1]
        except:
            attribute_text = " "
#将每一条岗位数据爬取下的内容以及传入参数 name 作为一个列表，依此加入到 info 列表中
        info.append( [ name,job_name, updatedate, company_name, companyind_text, companysize_text, workarea_text, providesalary_text, attribute_text, jobwelf])
#将数据存到 MySQL 中名为“51job”的数据库中
def save(info):
    #将数据保存到数据库表中对应的列
    for data in info :
        present_job = data[0]  # 当前爬取岗位
        job_name = data[1] #岗位
        updatedate = data[2]         #更新时间
        company_name = data[3]  # 公司名称
        companyind_text = data[4]        #公司类型
        companysize_text = data[5]       #公司规模
        workarea_text = data[6]                #工作地点
        providesalary_text = data[7]               #薪资
        attribute_text = data[8]      #工作要求
        jobwelf = data[9]  #工作待遇
        # 创建 sql 语句
        sql = "insert into jobs(当前爬取岗位,岗位,更新时间,公司名称,公司类型,公司规模,工作地点,薪资,工作要求,工作待遇) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
        # 执行 sql 语句
        cursor.execute(sql, [present_job, job_name, updatedate, company_name, companyind_text, companysize_text,
                             workarea_text, providesalary_text, attribute_text, jobwelf])
        db.commit()  # 提交数据
# 1.4进行数据的爬取
if    __name__     == '__main__': #主函数
    job=["产品经理","产品助理","交互设计","前端开发","软件设计","IOS开发","业务分析","安卓开发","PHP开发","业务咨询","需求分析","流程设计"
    ,"售后经理","售前经理","技术支持","ERP实施","实施工程师","IT项目经理","IT项目助理","信息咨询","数据挖掘","数据运营","数据分析","网络营销",
    "物流与供应链","渠道管理","电商运营","客户关系管理","新媒体运营","产品运营"]
#利用1.2获得的每个岗位对应的总页码数。
    page_list=['1141', '62', '169', '619', '356', '61', '229', '64', '56', '356', '1379', '147', '62', '29', '2000', '173', '184', '10', '2', '396', '221', '115', '2000', '381', '5', '295', '1233', '280', '699', '352']
    #https://www.pexels.com/
    option = ChromeOptions()
    UA="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"
    option.add_argument(f'user-agent={UA}')
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    web = webdriver.Chrome(chrome_options=option)  # chrome_options=chrome_opt,,options=option
    web.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
        Object.defineProperty(navigator, 'webdriver', {
          get: () => undefined
        })
      """
    })
    web.implicitly_wait(10)
    url='https://search.51job.com/list/000000,000000,0000,00,9,99,%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86,2,2.html?'
    web.get(url)
    time.sleep(6)
    le=len(job)
#连接数据库
    db = pymysql.connect(  # 连接数据库host="127.0.0.1",    #MySQL 服务器名
        host="localhost",
        user="root",  # 用户名
        password="20010422f",  # 密码
        database="student",  # 操作的数据库名称charset="utf8"
    )
    cursor = db.cursor()
    for j in range(4,le):
        for i in range(1,int(page_list[j])):#页面
            info = []
            # url = "https://search.51job.com/list/000000,000000,0000,00,9,99," + j + ",2," + str(i) + ".html?"
            url = "https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html?".format(job[j], i)
            web.get(url)
            ht = web.page_source
            soup = BeautifulSoup(ht, "lxml")
            jiexi(ht, info,job[j])
            print('岗位{}:{}/{}'.format(j,i,page_list[j]))
            time.sleep(2)
            save(info)
        time.sleep(3)
    cursor.close()
    # 关闭连接
    db.close()
# 二.数据清洗
# 2.1清洗相关函数的设计
#引入 pymysql 包
import pymysql
#连接 MySQL 数据库
db = pymysql.connect(
host="localhost",
user="root", password="20010422f",
database="student", charset="utf8"
)
def pipei():
    cursor = db.cursor()  # 获取操作游标
    cursor.execute("select * from jobs")  # 从 jobs 表中查询所有内容并保存
    results = cursor.fetchall()  # 接受全部的返回结果
    after_pipei = []  # 建立一个空列表，用来存储匹配后数据
    for each_result in results:
        if each_result[0] == '软件设计':
            if '软件' in each_result[0] or '设计' in each_result[0]:
                after_pipei.append(each_result)
    cursor.close()  # 关闭游标
    return after_pipei  # 返回匹配后的列
def split_city(data):
    after_split_city = []  #建立一个空列表，用来存储匹配后数据
    for each_date in data:
        each_date_list = list(each_date)
        each_date_list[5] = each_date_list[5].split('-')[0]    #将数据表中工作地点列以'-'进行切割，选取第一个元素替换
        after_split_city.append(each_date_list)
    return after_split_city
    #返回筛除后的数据
def salary(data):
    after_salary=[]    #建立一个空列表，用来存储匹配后数据
    for each_data in data:
        each_data=list(each_data)
        if each_data[7] != '' and each_data[6][-1] != '时' and each_data[7][-3] != '下' and each_data[7][-4:-2] != '以下' and each_data[7][-3] != '上':
    # 筛除缺失值，以小时计费，给出的薪资表达为在“……以下”及“……以上”等难以计算数据的工作岗位
    # 统一量纲（单位:千/月）
            if each_data[7][-1] == '年':
                each_data[7] = str(round((float(each_data[7].split('万')[0].split('-')[0]) + float(each_data[7].split('万')[0].split('-')[1])) * 5/12,1)) + '千 / 月'
            elif each_data[7][-1] == '天':
                each_data[7] = str(round((float(each_data[7].split('元')[0]) * 30/1000),1)) +'千 / 月'
            elif each_data[7][-3] == '万':
                each_data[7] = str(round((float(each_data[7].split('万')[0].split('-')[0]) + float(each_data[7].split('万')[0].split('-')[1])) * 5,1)) + '千/月'
            else:
                each_data[7] = str(round((float(each_data[7].split('千')[0].split('-')[0]) + float(each_data[7].split('千')[0].split('-')[1]) /2),1)) + '千 / 月'
        after_salary.append(each_data)
    return after_salary
# 返回平均工资后的数据

def job_attribute_text(data):
    for each_data in data:
        if len(each_data[8].split(',')) == 3:
            if ' 经验' in each_data[8].split(',')[1] or ' 在校生' in each_data[8].split(',')[1]:
                each_data[8] = each_data[8].split(',')[1] + ','
                # 以“，”切割后的列表长度为 3，若不包含“经验”元素，则保留“，学历”形式内容
            else:
                each_data[8] = ',' + each_data[8].split(',')[1]
            # 以“，”切割后的列表长度为 4，选取中间两个元素，保留“经验，学历”形式内容
        elif len(each_data[8].split(',')) == 4:
            each_data[8] = each_data[8].split(',')[1] + ',' + each_data[8].split(',')[2]
        else:
            each_data[8] = ''
    return data
#将清洗后的数据保存到数据库中 after_clean 表中，代码和保存爬取数据时类似
def save(data):
    cursor = db.cursor()
    for each_data in data:
        present_job = each_data[0]
        job_name = each_data[1]
        updatedate = each_data[2]
        company_name = each_data[3]
        companyind_text = each_data[4]
        companysize_text = each_data[5]
        workarea_text = each_data[6]
        providesalary_text = each_data[7]
        attribute_text = each_data[8]
        jobwelf = each_data[9]
        sql = "insert into after_clean(当前爬取岗位, 岗位,更新时间,公司名称 ,公司类型,公司规模,工作地点,薪资,工作要求,工作待遇) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
        cursor.execute(sql,
                       [present_job, job_name, updatedate,company_name, companyind_text,
                        companysize_text, workarea_text,
                        providesalary_text, attribute_text, jobwelf])
        db.commit()
    cursor.close()
    db.close()
# 2.2进行数据清洗
if __name__ == "__main__":
    data = pipei()
    data1 = split_city(data)
    data2 = salary(data1)
    data3 = job_attribute_text(data2)
#将清洗后的数据存储到数据库的另一个表格中
    save(data3)
# 3.7 岗位工作待遇热词词云图
#设计词频统计函数
# def wordcount(txt):
#     #转化为列表
#         # 统计词频的字典
#     word_freq = dict()
#     # 装载停用词,此处需将资料中给出的hit_stopwords.txt 文件放到本代码所在路径下
#     with open(r"D:\Users\yunmeng\PycharmProjects\小项目\大数据和上机二_数据可视化课程\相关文件\stopwords.txt", "r", encoding='utf-8') as f1:
#         # 读取我们的待处理本文
#         txt1 = f1.readlines()
#     stoplist = []
#     for line in txt1:
#         stoplist.append(line.strip('\n'))
#
#     #  切分、停用词过滤、统计词频
#     for w in list(jieba.cut(txt)):
#         if len(w) > 1 and w not in stoplist:
#             if w not in word_freq:
#                 word_freq[w] = 1
#             else:
#                 word_freq[w] = word_freq[w] + 1
#     return word_freq
# #连接数据库
# db = pymysql.connect(
#     host="localhost",
#     user="root", password="20010422f",
#     database="student", charset="utf8"
# )
# cursor = db.cursor()
# cursor.execute("SELECT `工作待遇` FROM `after_clean`")
# results = cursor.fetchall()
# txt = ''
# for each_result in results:
#     txt = txt + each_result[0]
# word_dict=wordcount(txt)
# da = pd.DataFrame({'word': word_dict.keys(), 'count': word_dict.values()})
# #将词频统计的结果导出
# da.to_csv(r'D:\Users\RK\PycharmProjects\小项目\大数据和上机二_数据可视化课程\代码文件\word_count.csv')
# #将导出的词频文件导入到tableau进行词云图的绘制