#!/usr/bin/python
# --coding:utf-8--


import requests
from lxml import etree

"""""""""""""""""""""""""""""""""
使用xpath爬取拉勾网中的招聘职位
"""""""""""""""""""""""""""""""""

"""
1、获取网站html代码
通过chrome的开发者工具，获取headers值，用于模拟浏览器登录，并设置编码格式。
2、获取职位数据：
使用etree.HTML(html.text)生成xpath的选择器，再使用xpath获取各职位数据,并
将获取到的每条职位的职位名称、公司、地点、薪水、发布时间保存于列表中。需要注
意的是，有些数据的值缺失或者不符合要求，需要使用
        try:
           # 这里写获取职位信息的代码
        except:  # 出错的数据不爬取
            continue
将出错的数据筛选掉。
3、爬取多页数据：
如果要爬取后面所有页的数据，可以使用循环实现，我们发现，后面每一页的url，只有
.html前的数据变化，并且一次递增的，所以我们使用循环时，将url修改为当前页的url，
再重复上述功能。根据每一页URL规律，使用for循环动态设置URL，循环爬取。
"""


for page in range(3):
    # 请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36 Core/1.47.933.400 QQBrowser/9.4.8699.400',
        'Connection': 'keep-alive',}
    cookies = {
        'Cookie': "user_trace_token=20220922104702-919cb60b-0f6b-42de-bb5f-9e60f01f1a9f; LGUID=20220922104702-e4478d0c-f054-42a2-a4f8-1681925f1db9; _ga=GA1.2.590540650.1663814823; index_location_city=%E5%85%A8%E5%9B%BD; __lg_stoken__=392a317e03da8f5e98250b24409236f3cd1315c5fc326a3c85d34a40984a6e441a72f164d4e82efbe178908561fae37044cd99faf0a1d7daa2910b239f4b9de351b2ef73a67d; JSESSIONID=ABAAABAABAGABFA1879AEBF3350572229BC55B07E1BD11D; WEBTJ-ID=20221007235321-183b326b4d918a-0538061bde9c6f-26021c51-921600-183b326b4da987; LGSID=20221007235322-11356c71-aa29-468a-8ea8-865c6db3090a; _gid=GA1.2.1389088088.1665158002; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1663814823,1665158003; sm_auth_id=jfj2p8qfcwjc49io; gate_login_token=64b952f94f37b1e14a8a40e02c72d721944c1c5e0f414b0b0b32276413d4df9a; _putrc=C20398DD0A638FED123F89F2B170EADC; login=true; unick=%E9%83%91%E7%94%B3%E6%96%87; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; privacyPolicyPopup=false; X_HTTP_TOKEN=c559b38e36325b0d337951566147327a8288155bc1; _gat=1; __SAFETY_CLOSE_TIME__18418267=1; sensorsdata2015session=%7B%7D; TG-TRACK-CODE=index_search; SEARCH_ID=9ee0baae8520498e8685e65dbeb3ea74; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218418267%22%2C%22first_id%22%3A%2218363176d6b252-0e3a8567975ecd-26021c51-921600-18363176d6c947%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24os%22%3A%22Windows%22%2C%22%24browser%22%3A%22Chrome%22%2C%22%24browser_version%22%3A%22105.0.0.0%22%2C%22%24latest_referrer_host%22%3A%22%22%7D%2C%22%24device_id%22%3A%2218363176d6b252-0e3a8567975ecd-26021c51-921600-18363176d6c947%22%7D; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1665159779; LGRID=20221008002258-47fa39b4-4e18-4be0-85dc-0d220fff83af"
    }
    # 构建不用页面的地址
    url = 'https://www.lagou.com/zhaopin/Python/'+str(page+1)+'/'
    print(url)
    # 设置请求头
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}
    # 发送请求并获取响应结果，返回response响应对象
    response = requests.get(url, headers=headers, cookies=cookies)
    # 设置编码
    response.encoding = 'utf-8'
    # 获取html脚本代码
    html = response.text
    # print(html)
    # 将html脚本代码解析成一个树
    html_tree = etree.HTML(html)
    # print(html_tree)
    # 获取所有招聘信息
    all_jobs = html_tree.xpath('//ul[@class="item_con_list"]/li')
    # print(all_jobs)
    for job in all_jobs:
        # 职位：
        job_name = job.xpath('.//div[@class="p_top"]/a/h3/text()')[0]
        # 公司：
        job_company = job.xpath('.//div[@class="company_name"]/a/text()')[0]
        # 地点：
        job_address = job.xpath('.//div[@class="p_top"]/a/span[@class="add"]/em/text()')[0]
        # 薪水：
        job_salary = job.xpath('.//div[@class="li_b_l"]/span[@class ="money"]/text()')[0]
        # 时间：
        job_time = job.xpath('.//div[@class="p_top"]/span/text()')[0]
        job_info = [job_name, job_company, job_address, job_salary, job_time]
        print(job_info)



