# -*- coding=utf-8 -*-
# @Time: 2023/6/21 14:48
# @File: zhilian.py

import requests
from lxml import etree
import numpy as np
import pandas as pd
import time

# 获取爬取的城市、职位和页码数
# def get_city_page(city, keys):
    # city = input('请输入要爬取的城市：')
    # keys = input('请输入要爬取的职位：')
    # return city, keys


# 构建url_list
def create_url_lis(city, keys):
    # 城市对应编码
    city_code_dict = {
        '上海': 538, '北京': 530, '广州': 763, '深圳': 765, '天津': 531, '武汉': 736, '西安': 854,
        '成都': 801, '南京': 635, '杭州': 653, '重庆': 551, '厦门': 682, '大连': 600, '全国': 489
    }
    url_lis = []
    city_code = city_code_dict[city]
    for p in range(1):
        url = 'https://sou.zhaopin.com/?jl={}&kw={}&p={}'.format(city_code, keys, p + 1)
        url_lis.append(url)
    return url_lis


# 根据url获取网页源代码
def get_html(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.43',
        'cookie': 'SHOW_ATTACHMENT_RESUME_TIP=true; x-zp-client-id=72c5454d-7772-445c-9737-e138f12c18de; FSSBBIl1UgzbN7NO=5i_5.wcooagPGZ4RNm2ccPRgnfa87IDouCl8GOQJ_siVFUky7_Rb29b04AshBZwA7svnqR58Dv.tcb43KheEZFq; locationInfo_search={%22code%22:%22763%22%2C%22name%22:%22%E5%B9%BF%E5%B7%9E%22%2C%22message%22:%22%E5%8C%B9%E9%85%8D%E5%88%B0%E5%B8%82%E7%BA%A7%E7%BC%96%E7%A0%81%22}; _uab_collina=168473933203279359889708; sts_deviceid=1886695c869af7-016ca45de5adbd-26031a51-921600-1886695c86aa92; LastCity=%E5%B9%BF%E5%B7%9E; LastCity%5Fid=763; acw_tc=2760828c16873315190447221e392961c64ec276d50c60e41096011aa70283; Hm_lvt_38ba284938d5eddca645bb5e02a02006=1686813864,1686844178,1686900803,1687331520; selectCity_search=763; ssxmod_itna=Qqfx9DBD2DRBqDKGHD8Wba4Cu7mxgD277G2Gx0v4heiODUxn4iaDToPThQ2YbDhiqfsqQ6qpIzf7CnGtWiG+r4GLDm9Lxv8YxiiDC40rD74irDDxD3DbSPDS8xG=DjCU=lBExYPGWjqitLD7UZlqDj8gj0/qG0DDtDiu3DKT0uDGYL/Lpo1kGdaihuD0txxBd4QP+uClnWhAUdVQW/ioQDzMFDtdUekkbDCr6MmwNo7G4WiF4zii44W2qe/G4YK0EwLI+NYGx=WBwwkDh4Q7eFU7+nxDfiOKqxD=; ssxmod_itna2=Qqfx9DBD2DRBqDKGHD8Wba4Cu7mxgD277G2Dn9E25Ds8iqDLAQAPXR87qn+uQxd43+cqRQqjKlhA+p3n08eT4uGfG4TKaOcGo9QxO8lOB13KqaLghGadMgoDwxhPGcDYKexD; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218842480e3d94c-0117cf1e214a608-26031a51-921600-18842480e3e1184%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTg4NDI0ODBlM2Q5NGMtMDExN2NmMWUyMTRhNjA4LTI2MDMxYTUxLTkyMTYwMC0xODg0MjQ4MGUzZTExODQifQ%3D%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%22%2C%22value%22%3A%22%22%7D%2C%22%24device_id%22%3A%2218842480e3d94c-0117cf1e214a608-26031a51-921600-18842480e3e1184%22%7D; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1687331701; FSSBBIl1UgzbN7NP=5RxtOQC11L6WqqqDE6W_qZqdVXx4qlcfRkVO8pACHhFDPwMAimANsbRZZaE55HhdgPoh0K7cNVSL.lRdoseECEH1k4ijoDkR3YyzCYbOm852PAn6.uVtBUo.YD8AwHpQXOjNtDdXOU1AXAWG9cY1jDpLQQX0h_984wNBcIzcwORG1jH7lfDqeJz8fWQd8vkqjix9yepOZWwC0sHswGfbr5tf0OZkm2LW6MBTzaZ2XkfQHTU4eTLxmlWH9waZrodvg.79JV8Q3fjug3jx4AL8ZiF__P9iTzNaTte1dWYPDg8Tq'
    }
    html = requests.get(url, headers=headers).text
    return html


# 解析网页源代码
def transform_html(response):
    # 职位名称、薪资、地区、工作经验、学历、职位类别、招聘人数、职位描述、岗位职责、职位福利、公司名称、公司类型、公司规模、详情页链接
    html = etree.HTML(response)
    # 获取工作名称
    job = html.xpath('//span[@class="iteminfo__line1__jobname__name"]/@title')
    # 获取薪资范围
    salary = html.xpath('//p[@class="iteminfo__line2__jobdesc__salary"]/text()')
    for i in range(len(salary)):
        salary[i] = salary[i].strip('\n').strip(' ').rstrip('\n')
    # 获取地区、经验、学历信息
    location, experience, education = ([] for i in range(3))
    require = html.xpath('//ul[@class="iteminfo__line2__jobdesc__demand"]')
    for req in require:
        try:
            loc = req.xpath('.//li[@class="iteminfo__line2__jobdesc__demand__item"]/text()')[0]
            location.append(loc)
        except:
            location.append(np.nan)
        try:
            exp = req.xpath('.//li[@class="iteminfo__line2__jobdesc__demand__item"]/text()')[1]
            experience.append(exp)
        except:
            experience.append(np.nan)
        try:
            edu = req.xpath('.//li[@class="iteminfo__line2__jobdesc__demand__item"]/text()')[2]
            education.append(edu)
        except:
            education.append(np.nan)

    # 获取职位标签
    job_tag = []
    job_tag_lis = html.xpath('//div[@class="iteminfo__line3__welfare"]')
    for tag in job_tag_lis:
        tag_info = tag.xpath('.//div[@class="iteminfo__line3__welfare__item"]/text()')
        tag_info = str(tag_info)
        job_tag.append(tag_info)

    # 获取公司名称
    company_name = html.xpath('//span[@class="iteminfo__line1__compname__name"]/text()')

    # 获取公司类型、公司规模
    company_type = []
    company_size = []
    company_detail = html.xpath('//div[@class="iteminfo__line2__compdesc"]')
    for company in company_detail:
        try:
            com_type = company.xpath('.//span[@class="iteminfo__line2__compdesc__item"]/text()')[0]
            company_type.append(com_type)
        except:
            company_type.append(np.nan)
        try:
            com_size = company.xpath('.//span[@class="iteminfo__line2__compdesc__item"]/text()')[1]
            company_size.append(com_size)
        except:
            company_size.append(np.nan)

    # 获取详情页链接
    job_href = html.xpath('//a[@class="joblist-box__iteminfo iteminfo"]/@href')

    data_lis = [job, salary, location, experience, education, job_tag, company_name, company_type, company_size,
                job_href]
    # 爬取结果合成一个dataframe
    get_data = pd.DataFrame(
        columns=['职位名称', '薪资范围', '地点', '工作经验', '学历要求', '岗位标签', '公司名称', '公司类型', '公司规模',
                 '详情页链接'])
    for col, data in zip(get_data.columns, data_lis):
        get_data[col] = data
    # 返回数据的dataframe
    return get_data


# 循环爬取每一页的数据，并且合成为一个dataframe
def concat_data(url_lis):
    # 定义字典储存dataframe
    final_df_dict = {}
    for url, num in zip(url_lis, range(len(url_lis))):
        try:
            print('开始爬取第{}页'.format(num + 1))
            # 获取网页源代码
            response = get_html(url)
            # 解析网页源代码并且生成一个dataframe
            final_df = transform_html(response)
            # 将dataframe保存到字典里
            final_df_dict[num] = final_df
            print('第{}页爬取完成'.format(num + 1))
            # 爬取完成后程序休眠8秒
            time.sleep(10)
        except:
            print('所有页码都爬取完成！总计爬取{}页'.format(num + 1))
    concat_df = pd.concat(list(final_df_dict.values()), ignore_index=True)
    return concat_df


# 爬取结果保存到csv
def save_df(df, city):
    file_name = '{}招聘信息.csv'.format(city)
    path = r'.\{}'.format(file_name)
    df.to_csv(path, encoding='utf-8-sig', index=False)
    print('{}保存成功'.format(file_name))
    return


# 调用主函数，执行上边所有函数
def main(city, keys):
    url_lis = create_url_lis(city, keys)
    concat_df = concat_data(url_lis)
    save_df(concat_df, city)
    return print('所有程序执行完毕')
