# coding:utf-8
import requests
from bs4 import BeautifulSoup
import urllib.parse
import urllib.request
from urllib.parse import quote
import string
# 设置字体
from sc.ZDao import connect_mysql
import time
import random

#
def getChartsData(areaName,jobName):
    # 连接数据库
    # 操作数据库,将数据存储进去
    db =connect_mysql()
    # 使用cursor()方法获取操作游标,即操作数据库的一个对象
    sql = ""
    cur = db.cursor()  # 游标 操作数据表
    sql ="select job_edu,count(*) from jobs where job_loc like '%"+areaName+"%' and job_name like '%"+jobName+"%' group by job_edu "
    print(sql)
    edus = [];
    counts = [];
    try:
        cur.execute(sql)
        res = cur.fetchall()  # 查到的所有的记录 是一个集合
        for r in res:
            edus.append(r[0]);
            counts.append(r[1]);
        data = {"edus": edus, "counts": counts};
        return data;
    except:
        db.rollback()

# 爬取数据方法
def run():
    jName = input("请输入城市");
    #jName='北京'
    city = {'北京': '530',
            '上海': '538',
            '广州': '763',
            '深圳': '765',
            '天津': '531',
            '武汉': '736',
            '西安': '854',
            '成都': '801',
            '沈阳': '599',
            '南京': '635',
            '杭州': '653',
            '苏州': '639',
            '重庆': '551',
            '长沙': '749',
            '厦门': '682',
            '南昌': '691'
            }  # 如果还要其它城市 对着页面，查看城市编码 https://sou.zhaopin.com/?jl=691&kw=java&p=1
    jName=city[jName]
    aName = input("请输入岗位")
    #aName='java'
    #num = 1
    #while num <= 5:
    url = 'https://sou.zhaopin.com/?jl='+jName+'&kw=' + aName + '&p=1'
    url = urllib.parse.quote(url, safe=string.printable)
    USER_AGENT = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
                    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"]
    header={"User-Agent":random.choice(USER_AGENT),
            "Referer": "https://sou.zhaopin.com",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Cookie": "x-zp-client-id=de8c355a-96b7-4ec3-bf94-f3a19574b75b; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219038b23783a54-0d7bae3fa19b49-4c657b58-1638720-19038b2378411ec%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTkwMzhiMjM3ODNhNTQtMGQ3YmFlM2ZhMTliNDktNGM2NTdiNTgtMTYzODcyMC0xOTAzOGIyMzc4NDExZWMifQ%3D%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%22%2C%22value%22%3A%22%22%7D%2C%22%24device_id%22%3A%2219038b23783a54-0d7bae3fa19b49-4c657b58-1638720-19038b2378411ec%22%7D; Hm_lvt_ae16410ef779c428ebd5bd7bf2fd155e=1719370998; sensorsdata2015jssdkchannel=%7B%22prop%22%3A%7B%22_sa_channel_landing_url%22%3A%22%22%7D%7D; selectCity_search=736; Hm_lvt_08e585d395455886ebe17d4b393b6523=1745151606; Hm_lvt_7fa4effa4233f03d11c7e2c710749600=1744009761,1745151394,1745158739; Hm_lpvt_7fa4effa4233f03d11c7e2c710749600=1745158739; HMACCOUNT=76F8A742B49BD472; locationInfo_search={%22code%22:%22694%22%2C%22name%22:%22%E4%B9%9D%E6%B1%9F%22%2C%22message%22:%22%E5%8C%B9%E9%85%8D%E5%88%B0%E5%B8%82%E7%BA%A7%E7%BC%96%E7%A0%81%22}; LastCity=%E4%B9%9D%E6%B1%9F; LastCity%5Fid=694"
            }

    #反爬处理
    proxy_handler = urllib.request.ProxyHandler({})
    opener = urllib.request.build_opener(proxy_handler)
    req = urllib.request.Request(url,headers=header)
    time.sleep(random.uniform(1, 3))
    with opener.open(req,timeout=10) as res:
        content = res.read().decode('utf-8')

    # 整个页面的数据
    # 创建一个Beautifulsoup对象
    soup = BeautifulSoup(content, 'lxml')
    jobList = soup.select(".joblist-box__item")
    # 创建一个list存放job相关信息的字典
    job_list = []
    for job in jobList:
        job_dic = {}
        # 每一个job都是一个工作的信息的div
        # 岗位名称
        job_name = job.select('.jobinfo__name')[0].get_text()
        #print(job_name)
        # 公司名称
        company_name = job.select('.companyinfo__name')[0].get_text()
        #print(company_name)
        # 工资
        job_salary = job.select('.jobinfo__salary')[0].get_text()
        job_salary = job_salary.strip().replace(' ', '').replace('\n', '')

        # 获取工作地点、经验要求、学历要求
        lis = job.select('.jobinfo__other-info-item')
        job_loc = lis[0].get_text()
        job_exp = lis[1].get_text()
        job_edu = lis[2].get_text()
        job_dic['job_name'] = job_name
        job_dic['company_name'] = company_name
        job_dic['job_salary'] = job_salary
        job_loc = job_loc.split("-")[0]
        job_dic['job_loc'] = job_loc
        job_dic['job_exp'] = job_exp
        job_dic['job_edu'] = job_edu
        job_list.append(job_dic)
    # 操作数据库,将数据存储进去
    db =connect_mysql()
    # 使用cursor()方法获取操作游标,即操作数据库的一个对象
    if db:
        cursor = db.cursor()
        try:
            # 准备插入数据的 SQL 语句
            sql = "INSERT INTO jobs(jid,job_name,company_name,job_sal,job_loc,job_exp,job_edu) VALUES(null,%s,%s,%s,%s,%s,%s)"
            # 准备数据列表
            data = [(i["job_name"], i["company_name"], i["job_salary"], i["job_loc"], i["job_exp"], i["job_edu"]) for i
                    in job_list]
            # 批量插入数据
            cursor.executemany(sql, data)
            # 提交事务
            db.commit()
            print("数据插入成功")
        except Exception as e:
            print(f"插入数据失败: {e}")
            # 回滚事务
            db.rollback()
        finally:
            # 关闭游标
            cursor.close()
            # 关闭数据库连接
            db.close()
    else:
        print("无法连接到数据库")
    print("爬取数据成功")




run();