import requests
from lxml import etree
import re
import os
import pandas as pd
import time
# 58的反爬层出不穷，抓不同性质内容有不同反爬
df = pd.read_excel('过滤IP.xlsx')
ip_ls = list(df['代理IP'])


def get_response(url, headers):
    flag = 1
    html = ''
    while flag:
        ip = ip_ls.pop(0)   # 取首位的ip来用
        print('当前IP=', ip)
        proxy = {'https': ip}
        try:
            response = requests.get(url, headers=headers, proxies=proxy)
            html = response.text
            if not re.findall('访问过于频繁，本次访问做以下验证码校验', html, re.S):
                flag = 0
        except:
            pass
    return html


url0 = 'https://yongkang.58.com/job.shtml?PGTID=0d100000-0021-9dfe-0036-8d0574f32218&ClickID=2'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
html = get_response(url0, headers)  # 一级请求
xml = etree.HTML(html)
classifys = xml.xpath('//div[@id="sidebar-right"]/ul')
# 岗位一级分类
for classify in classifys:
    clas = classify.xpath('./li')
    # 岗位二级分类
    for c in clas:
        big_clas = c.xpath('./strong/a/text()')[0]
        big_clas = re.sub(r'/', '-', big_clas)
        if os.path.exists(big_clas) == False:
            os.mkdir(big_clas)
        lit_clas = [(i, 'https://yongkang.58.com'+j) for i, j in zip(c.xpath('./a/text()'), c.xpath('./a/@href'))]
        # 岗位三级分类
        for l in lit_clas:
            name = l[0]
            name = re.sub(r'/', '-', name)
            each_html = get_response(l[1], headers)
            if each_html == None:
                continue
            each_xml = etree.HTML(each_html)
            total_page = each_xml.xpath('//i[@class="total_page"]/text()')[0]
            # 三级分类下所有招聘岗位详情
            job_data = []   # 存储各岗位详情字典
            for page in range(int(total_page)):     # 翻页
                if page > 0:
                    each_html = get_response(l[1]+f'pn{page+1}/', headers)
                    if each_html == None:
                        continue
                    each_xml = etree.HTML(each_html)
                job_ls = each_xml.xpath('//li[@class="job_item clearfix"]')
                for job in job_ls:
                    href = job.xpath('.//a/@href')[0]
                    job_html = get_response(href, headers)
                    # print(job_html)
                    if job_html == None:
                        continue
                    job_xml = etree.HTML(job_html)
                    dic = dict()
                    dic['更新时间'] = job_xml.xpath('//span[@class="pos_base_num pos_base_update"]/span/text()')[0]
                    dic['浏览人数'] = job_xml.xpath('//span[@class="pos_base_num pos_base_browser"]/i/text()')[0]
                    dic['申请人数'] = job_xml.xpath('//span[@class="pos_base_num pos_base_apply"]/span/text()')[0]
                    dic['标题'] = job_xml.xpath('//span[@class="pos_name"]/text()')[0]
                    dic['福利'] = job_xml.xpath('//div[@class="pos_welfare"]/span/text()')
                    dic['招人数'] = re.sub(r'\s', '', ''.join(job_xml.xpath('//div[@class="pos_base_condition"]/span[1]/text()')[0]))
                    dic['学历'] = job_xml.xpath('//div[@class="pos_base_condition"]/span[2]/text()')[0]
                    dic['经验'] = re.sub(r'\s', '', ''.join(job_xml.xpath('//div[@class="pos_base_condition"]/span[3]/text()')[0]))
                    dic['地点'] = job_xml.xpath('//div[@class="pos-area"]/span[2]/text()')[0]
                    dic['职位描述'] = re.sub(r'\s', '', ''.join(job_xml.xpath('//div[@class="des"]/text()')))
                    dic['公司名称'] = job_xml.xpath('//div[@class="baseInfo_link"]/a/text()')[0]
                    dic['公司类别'] = job_xml.xpath('//p[@class="comp_baseInfo_belong"]/a/text()')[0]
                    dic['公司规模'] = job_xml.xpath('//p[@class="comp_baseInfo_scale"]/text()')[0]
                    dic['公司介绍'] = re.sub(r'\s', '', ''.join(job_xml.xpath('//div[@class="txt"]//text()')))
                    time.sleep(0.5)
                    job_data.append(dic)
            df_save = pd.DataFrame(job_data)
            df_save.to_excel(big_clas+'\\'+name+'.xlsx', index=False)
            print('已存完小类', name)
        print('已存完大类', big_clas)
