# +--------------------------
# | User: zq                -
# | Version: python3.7      -
# | Time: 2020-03-17 14:52                
# +--------------------------
# 通过ip代理绕过ip反爬

import requests
from scrapy import Selector
from fake_useragent import UserAgent


def get_html(url):
    print("开始下载url: {}".format(url))
    ua = UserAgent()
    # 代理服务器
    proxyHost = "http-dyn.abuyun.com"
    proxyPort = "9020"

    # 代理隧道验证信息
    proxyUser = "HJE7D70248LR0LAD"
    proxyPass = "8F03C65640B88879"

    proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
        "host": proxyHost,
        "port": proxyPort,
        "user": proxyUser,
        "pass": proxyPass,
    }

    proxies = {
        "http": proxyMeta,
        "https": proxyMeta,
    }
    headers = {
        'User-Agent': ua.random
    }

    resp = requests.get(url, proxies=proxies, headers=headers)

    # print(resp.status_code)
    # print(resp.text)
    return resp


if __name__ == "__main__":
    for i in range(1, 30):
        job_list_url = "https://www.lagou.com/zhaopin/Python/{}/?filterOption={}".format(
            i,
            i)
        job_list_res = get_html(job_list_url)
        job_list_html = job_list_res.content.decode('utf8')

        sel = Selector(text=job_list_html)
        all_lis = sel.xpath("//div[@id='s_position_list']//ul//li//div[@class='position']//a/@href").extract()
        for url in all_lis:
            success = True
            while success:
                try:
                    job_res = get_html(url)
                    job_html = job_res.content.decode('utf8')
                    job_sel = Selector(text=job_html)
                    # print(job_html)
                    print(job_sel.xpath("//div[@class='job-name']//h1/text()").extract()[0])
                    # 跳出循环
                    success = False
                except Exception as e:
                    print("下载失败")
