#基本思路是：
#先完成列表抓取，再分别抓详情
#1.识别每页网页地址，作为大的循环（提点：<a>标签抓取，直到<a>标签href='javascript'截止）
#2.层层提取，直到获得详情url地址
#3.得到详情url地址后，再次使用requests请求
#4.进入到详情页面截取步骤---获得元素代码，并且提取
#整体上，可以分两块来写，列表截取一块，详情提取一块
#步骤：
#首页网址url='https://www.liepin.com/zhaopin/?isAnalysis=&dqs=&pubTime=&salary=&subIndustry=&industryType=&compscale=&key=python&init=-1&searchType=1&headckid=bea8071f1d80209f&compkind=&fromSearchBtn=2&sortFlag=15&ckid=bea8071f1d80209f&degradeFlag=0&jobKind=&industries=&clean_condition=&siTag=I-7rQ0e90mv8a37po7dV3Q~fA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_prime&d_ckId=da72f14aa0268cb48115fcc4ed581289&d_curPage=9&d_pageSize=40&d_headId=da72f14aa0268cb48115fcc4ed581289&curPage=0'
#难度是如何获取下一页列表？上面提点讲到，还需要利用while循环知道href出现'javascript'为止
#获取findall所有<div>,class='job-info'标签元素列表，然后列表提取所有<a>标签的href，存入新列表中（这里为了有效识别，可以将title也提取出来）
#上面已经获得了所有python招聘职位及详情连接
#下面获取详情信息，可以参考上述方法，bs4网页解析
#最后，将获取到的详情信息，依次存入csv文档中

import requests, time
import csv
from bs4 import BeautifulSoup

def job(url):
    h={'cookie':'abtest=0; __uuid=1584873812968.15; __tlog=1584873812991.95%7C00000000%7C00000000%7Cs_00_010%7Cs_00_010; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1584873818; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1584873845; __session_seq=5; __uv_seq=5',
  'use-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}
    lst_total=[]
    try:
        while url!='javascript':
            time.sleep(1)   #这里面要加个时间延迟，避免被服务器识别
            r=requests.get(url,headers=h)
            content=r.text
            soup=BeautifulSoup(content,'lxml')   #整个网页内容在soup里
            title=soup.find_all('div',class_='job-info')
            #print(title)
            lst_1=[]
            for i in title:
                lst=[]
                title_detail=i.find('h3').get_text(strip=True)   #得到招聘标题
                #print(title_detail)
                lst.append(title_detail)
                r_href=i.select('a')[0]['href']   #得到详情链接
                #注意，实际获取到的详情链接格式不统一，这里需要用到if in来判断统一格式
                if 'https://www.liepin.com' in r_href:
                    r_href_1=r_href
                else:
                    r_href_1='https://www.liepin.com'+r_href
                #print(i.select('a'))
                #print(r_href_1)
                lst.append(r_href_1)
                lst_1.append(lst)   #将得到的标题和详情网址作为列表放入总的列表中，便于后续操作
            #print(lst_1)
            lst_total.extend(lst_1)
            #print(len(lst_total))
            #获取下一页网址
            page_url=soup.find('div',class_='pagerbar')
            #print(page_url)
            url='https://www.liepin.com'+page_url.select('a')[-3]['href']
            #print(url)
        return lst_total
    except:
        #print(lst_total)
        return lst_total

#开始把获取到的详情带入到lst_total
def description(x):
    try:
        lst_total_1=[]
        for i in x:
            #print(i)    #单独验证没有问题
            url_1=str(i[1])
            #print(url_1)   #这里验证没有问题
            h={'cookie':'__uuid=1587565242923.41; lt_auth=u%2BZbbyZQyF%2BqtXeMjDYItf1K29v8AjmY8yhbhh5UhtC4XqLr4PrjRAKAqLAAxAMhkxkmcsULNLj5MO77y3tN70sWwGmul4CxvfOk0n0eTuNkHuyflMXuqs7QQJslrXg6ykpgn2si; user_roles=0; user_photo=5d5513d34ebeb1284dfc774b07u.png; user_name=%E5%B0%A4%E6%B9%A7; need_bind_tel=false; new_user=false; c_flag=e3d48cf9b6718286c11e15eff7b44f2a; gr_user_id=d71e94b2-ce1f-46ab-9ca7-a3097197a490; bad1b2d9162fab1f80dde1897f7a2972_gr_last_sent_cs1=3b40966970f4770f1a667e00ad0d9493; imClientId=09207f2d57c179d416b5a8e3027c3e07; imId=09207f2d57c179d4b506c240d1084464; imClientId_0=09207f2d57c179d416b5a8e3027c3e07; imId_0=09207f2d57c179d4b506c240d1084464; grwng_uid=8d87db7a-11f7-4d94-aaaf-f57e3dfe2c03; __s_bid=cbcb9c7f3b2b84a4e7cde6372d3d904567c0; fe_se=-1587565293835; __tlog=1587565242926.11%7C00000000%7CR000000075%7Cs_00_pz0%7Cs_00_pz0; fe_im_socketSequence_0=19_13_13; bad1b2d9162fab1f80dde1897f7a2972_gr_cs1=3b40966970f4770f1a667e00ad0d9493; abtest=1; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1588254775,1588256349,1588256354,1588256735; _fecdn_=1; JSESSIONID=42BBEE874121FE6EE5FC9864068582F4; __session_seq=47; __uv_seq=8; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1588405086',
      'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
            time.sleep(2)
            r_1=requests.get(url_1,headers=h)
            content_1=r_1.text
            soup_1=BeautifulSoup(content_1,'lxml')
            #print(soup_1)
            job_des=soup_1.find('div',class_='content content-word').get_text(strip=True)
            #print(job_des)  #这里验证没有问题
            i.append(job_des)
            #print(i)
            lst_total_1.append(i)
        return lst_total_1
    except:
        return lst_total_1
    
#<---主程序--->
url='https://www.liepin.com/zhaopin/?isAnalysis=&dqs=&pubTime=&salary=&subIndustry=&industryType=&compscale=&key=python&init=-1&searchType=1&headckid=bea8071f1d80209f&compkind=&fromSearchBtn=2&sortFlag=15&ckid=bea8071f1d80209f&degradeFlag=0&jobKind=&industries=&clean_condition=&siTag=I-7rQ0e90mv8a37po7dV3Q~fA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_prime&d_ckId=da72f14aa0268cb48115fcc4ed581289&d_curPage=9&d_pageSize=40&d_headId=da72f14aa0268cb48115fcc4ed581289&curPage=0'
x=job(url)
y=description(x)
#<---放在最后写入文件用--->
with open('liepinjob_1.csv','w',newline='')as f:
    f_csv=csv.writer(f)
    for item in y:
        f_csv.writerow(item)