#在网页源代码中搜索python开发工程师，搜不到
#所以联想到，使用ajax方式获取
import requests
from lxml import etree
import time
import re

headers = {

    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
                  ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 '
                  'Safari/537.36',
    'Referer': 'https://www.lagou.com/jobs/list_python?' \
               'city=%E5%8C%97%E4%BA%AC&cl=false&fromSearch=true&labelWords=&suginput=',
    'Origin': 'https://www.lagou.com',
    'X-Anit-Forge-Code': '0',
    'X-Anit-Forge-Token': 'None',
    'X-Requested-With': 'XMLHttpRequest'
}

def request_list_page():
    url="https://www.lagou.com/jobs/positionAjax.json?city=%E5%8C%97%E4%BA%AC&needAddtionalResult=false"
    data={
        'first' :'false',
        #从第二页返回到第一页，first也是false
        'pn' :1,
        #获取第二页、第三页以及以后的，修改pn=2,...
        'kd' :'python'
    }
    for x in range(1,14):
        data['pn']=x
        # 拿到的是post数据
        response = requests.post(url, headers=headers, data=data)
        # json方法，如果返回的是json数据，那么这个方法会load成字典
        # 如果返回的不是json数据，直接load，报错
        # 拉勾网做了反爬虫，所以，需要另外拿到refer，代表你当前的请求的上一个请求是从哪里来的
        # 把request headers里面所有的信息都设置，最保险
        #print(response.json())
        time.sleep(3)
        result=response.json()
        print(result)
        #接下来去拿职位信息
        positions=result['content']['positionResult']['result']
        for position in positions:
            positionId=position['positionId']
            position_url='https://www.lagou.com/jobs/%s.html'%positionId
            parse_position_detail(position_url)
            break
        break

def parse_position_detail(url):
    response=requests.get(url,headers=headers)
    text=response.text
    html=etree.HTML(text)
    #position_name job_request_spans salary city work_years education
    position_name=html.xpath("//div[@class='job-name']/span")
    # html.xpath("//span[@class='name']/text()")[0]
    job_request_spans=html.xpath("//dd[@class='job_request']/p/span")
    salary=job_request_spans[0].xpath('.//text()')[0].strip()
    city=job_request_spans[1].xpath('.//text()')[0].strip()
    city=re.sub(r"[\s/]]","",city)
    work_years=job_request_spans[2].xpath('.//text()')[0].strip()
    city=re.sub(r"[\s/]]","",work_years)
    education=job_request_spans[3].xpath('.//text()')[0].strip()
    city=re.sub(r"[\s/]]","",education)
    #不使用.join的时候，text（）拿到的是列表，使用之后是字符串
    desc="".join(html.xpath("//dd[@class='job_bt']//text()")).strip()








def main():
    request_list_page()

if __name__ == '__main__':
    main()

