import requests
import re
import urllib3
from fake_useragent import UserAgent
from   lxml import  html
etree = html.etree
from multiprocessing import Pool
import  time
import random
import os


def get_response(url):
    header = {
        'User-Agent': str(UserAgent().random) ,
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'Accept-Encoding':'gzip, deflate, br',
        'Accept-Language':'zh-CN,zh;q=0.9',
        'Cache-Control':'max-age=0',
        'Connection': 'keep-alive',
        'Referer':'https://www.liepin.com/zhaopin/?init=-1&headckid=0f6cdf2f0d0a53ce&dqs=&fromSearchBtn=2&imscid=R000000035&ckid=86b74561064498c2&degradeFlag=0&key=%E7%BD%91%E9%A1%B5%E8%AE%BE%E8%AE%A1%E5%B8%88&siTag=eixLkHE9-D9NGnHPldlDBQ%7EfA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_unknown&d_ckId=8a47eb578cccf0a33f458e3dbaedc5e1&d_curPage=0&d_pageSize=40&d_headId=a3aed0e027eb7af9650964c973513af4&curPage=1',
        'Upgrade-Insecure-Requests':'1',
        'Cookie':'abtest=0; __uuid=1584454454459.09; JSESSIONID=D3F07D0FC4E9E7AA30EE5F5A7D0128B9; __tlog=1584586689080.93%7CR000000035%7CR000000035%7C00000000%7C00000000; __session_seq=1; __uv_seq=1; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1584454455,1584499900,1584586689; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1584586689',
        'Host':'www.liepin.com'
    }
    proxies = {
        'http': 'http://' + '112.95.204.185:8888',
        # 'https': 'https://' + '112.95.204.185:8888'
    }
    s = requests.session()

    s.keep_alive = False
    requests.DEFAULT_RETRIES = 4
    try:
        global response
        urllib3.disable_warnings()
        response= requests.get(url, header,timeout=5,verify=False)
    except Exception as e:
        print('timeout?',str(e))
        pass
    # response.encoding = response.apparent_encoding
    return response

def get_Skill_url_list(url):
    # 一重循环，获取一级目录
    for j in range(1,5):
        # 测试反爬专用打印!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # print(get_response(url).text)
        selector = etree.HTML(get_response(url).text)
        # 拿到一个一级目录，一级目录包含：技术，产品，设计，运营
        first_title=selector.xpath('//*[@id="subsite"]/div[1]/div[1]/ul/li[%d]/div/h2/text()'%j)[0]
        # 拿到当前一级分类下的二级分类目数量，如一级分类 “技术” 下有 开发，移动开发及前端，测试，运维，高端职位等 5个二级分类
        second_title_length=len(selector.xpath('//*[@id="subsite"]/div[1]/div[1]/ul/li[%d]/dl/dt/text()'%j))

        # 二重循环，获取二级目录
        for i in range(1,second_title_length+1):
            # 拿到一个二级目录名字
            second_title=selector.xpath('//*[@id="subsite"]/div[1]/div[1]/ul/li[%d]/dl/dt[%d]/text()'%(j,i))[0]
            # 拿到当前二级分类下所有三级分类——技术名字（如JAVA，PHP，PYTHON）和对应的url
            third_title_list=selector.xpath('//*[@id="subsite"]/div[1]/div[1]/ul/li[%d]/dl/dd[%d]/a/text()'%(j,i))
            third_url_list=selector.xpath('//*[@id="subsite"]/div[1]/div[1]/ul/li[%d]/dl/dd[%d]/a/@href'%(j,i))

            # 三重循环，取出每个三级目录,断点续传三级目录！！！~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            for k in range(3,len(third_title_list)):
                time.sleep(random.randint(0,2))
                third_title=third_title_list[k]
                # 构造三级分类url,
                Skill_work_url='https://www.liepin.com'+str(third_url_list[k]).replace("&dqs=050080","").replace('imscid=R000000035&','')
                # 请求当前三级url
                Skill_work_html = get_response(Skill_work_url)
                # print(Skill_work_html.text)
                # 从三级分类页面（如JAVA）中通过正则表达式获取到该三级分类的最大翻页数
                Page_num_max = str(re.findall('&curPage=(.*?)" title="末页"></', Skill_work_html.text, re.S)).split("=")[-1].replace("']", "")

                # 准备一个空列表，用于存放一个三级目录下的所有岗位url
                multiprocessing_list = []
                # 四重循环，根据该三级分类的最大翻页数，构造每一页的url
                for l in range(0, int(Page_num_max) + 1):
                    time.sleep(random.randint(0, 3))
                    # 构造当前页数的url，例如第一次循环java岗位下的第1页，第二次就是第2页
                    Skill_work_page_url = Skill_work_url + "&curPage=%d" % l

                    print(Skill_work_page_url)
                    # 用etree,xpath解析页面，得到当前翻页数的所有招聘岗位的url
                    each_page_work_selector = etree.HTML(get_response(Skill_work_page_url).text)
                    each_work_url_list = each_page_work_selector.xpath('//*[@class="sojob-item-main clearfix"]/div[1]/h3/a/@href')

                    # 五重循环，依次取出每个页面中的岗位url，组合所有标题和url并追加到外面的列表
                    for o in range(0, len(each_work_url_list)):
                        each_work_url = each_work_url_list[o]
                        # 构造每条岗位 的一二三级标题和url 字符串 ，追加到外面的列表。最终把当前三级标题下所有岗位url汇总
                        eeach_work_title=first_title+'|'+second_title+'|'+third_title+'|'+each_work_url
                        multiprocessing_list.append(eeach_work_title)

                # 多进程入口
                pool = Pool(processes=18)
                # multiprocessing_list 是这样的形式  [[技术，开发，java，url],[],...,[]]
                pool.map(get_all_imformation, multiprocessing_list)
                # 关闭pool，使其不在接受新的（主进程）任务
                pool.close()
                # 主进程阻塞后，让子进程继续运行完成，子进程运行完后，再把主进程全部关掉。
                pool.join()

def get_all_imformation(all_title_url):
    time.sleep(random.randint(0,10))
    all_title_url_list=str(all_title_url).split('|')
    first_title=all_title_url_list[0]
    second_title=all_title_url_list[1]
    third_title=all_title_url_list[2]
    Skill_work_url=all_title_url_list[3]
    if len(Skill_work_url.split("/"))<=3:
        Skill_work_url='https://www.liepin.com'+Skill_work_url
    eawork_selector = etree.HTML(get_response(Skill_work_url).text)

    work_name = eawork_selector.xpath('//*[@class="title-info"]/h1/text()')[0]
    company_name = eawork_selector.xpath('//*[@class="title-info"]/h3/a/text()')[0]
    company_worker_num = eawork_selector.xpath('//*[@class="new-compintro"]/li[2]/text()')[0]
    company_industry = eawork_selector.xpath('//*[@class="new-compintro"]/li[1]/a/text()')[0]
    salary_num = str(eawork_selector.xpath('//*[@class="job-item-title"]/text()')[0]).replace("\\r","").replace("\\n",
                                             "").replace("\\t", "").replace("', '", "").replace(" ", "").strip()
    work_city = eawork_selector.xpath('//*[@class="basic-infor"]/span/a/text()')[0]
    diploma_required = eawork_selector.xpath('//*[@class="job-qualifications"]/span[1]/text()')[0]
    experience_requird = eawork_selector.xpath('//*[@class="job-qualifications"]/span[2]/text()')[0]
    language_required = eawork_selector.xpath('//*[@class="job-qualifications"]/span[3]/text()')[0]
    age_required = eawork_selector.xpath('//*[@class="job-qualifications"]/span[4]/text()')[0]
    work_description = str(eawork_selector.xpath('//*[@class="job-item main-message job-description"]/div/text()')).replace("'",'').replace(
                        ",", "").replace("\\r", "").replace("\\n", "").replace(" ", "").replace("[", "").replace("]","").strip()
    try:
        departments_name = eawork_selector.xpath('//*[@class="about-position"]/div[5]/div/ul/li[1]/label/text()')[0]
    except IndexError as e:
        departments_name = '无'
    try:
        major_required = eawork_selector.xpath('//*[@class="about-position"]/div[5]/div/ul/li[2]/label/text()')[0]
    except IndexError as e:
        major_required = '无'
    try:
        report_departments = eawork_selector.xpath('//*[@class="about-position"]/div[5]/div/ul/li[3]/label/text()')[0]
    except IndexError as e:
        report_departments = '无'
    try:
        subordinates_num = eawork_selector.xpath('//*[@class="about-position"]/div[5]/div/ul/li[4]/label/text()')[0]
    except IndexError as e:
        subordinates_num = '无'
    print('当前一级分类:', first_title, "。二级分类:", second_title, '。三级分类：', third_title,"岗位名称:",work_name)
    fianl_information = first_title + '|' + second_title + '|' + third_title + '|' + work_name + ',' + company_name + ',' + company_worker_num + ',' + company_industry + ',' + salary_num + ',' + work_city + ',' + diploma_required + ',' + experience_requird + ',' + language_required + ',' + age_required + ',' + work_description + ',' + departments_name + ',' + major_required + ',' + report_departments + ',' + subordinates_num
    save_information(fianl_information)
    # print(fianl_information)

# 保存数据到csv文件，fianl_information的格式： 一级标题|二级标题|三级标题|三级标题下一条招聘数据（15列）
def save_information(fianl_information):
    information_list=str(fianl_information).split("|")
    first_title=information_list[0]
    second_title=information_list[1]
    third_title=information_list[2]
    information=str(information_list[3])
    # print(information)
    file_path='D:\\猎聘爬虫\\'+ first_title+'\\'+second_title+'\\'+third_title+'.csv'
    if not os.path.exists("D:\\猎聘爬虫\\"+ first_title+'\\'+second_title):
        os.makedirs("D:\\猎聘爬虫\\"+ first_title+'\\'+second_title)
    Error_count=0
    try:
        with open(file_path, mode="a+")as f:
            f.write(information)
            f.write('\n')
            f.flush()
    except UnicodeEncodeError as e:
        Error_count+=1
        pass

if  __name__ == '__main__':
    url = "https://www.liepin.com/it/"
    get_Skill_url_list(url)

