# -*- coding: utf-8 -*-


from lxml import etree
from fake_useragent import UserAgent
import time
import random
import os
import requests
import shutil
import base64
from requests.auth import HTTPProxyAuth

# 代理服务器
proxyHost = "http-pro.abuyun.com"
proxyPort = "9010"
# 代理隧道验证信息
proxyUser = "H01I644F9J45ODID"
proxyPass = "C61D2EF7EE18A03A"



# 获取页面url
def get_url():
    for page in range(1, 10):
        print("======当前页:" + str(page) + "==========")
        url = f"http://sh.ganji.com/zhaozu/1/pn{page}/"
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        headers = {
            'User-Agent': ua.random,
            # 'Proxy-Authorization': "Basic " + base64.urlsafe_b64encode(
            #     bytes((proxyUser + ":" + proxyPass), "ascii")).decode("utf8")
        }
        try:
            proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
                "host" : proxyHost,
                "port" : proxyPort,
                "user" : proxyUser,
                "pass" : proxyPass,
            }

            proxies = {
                "http"  : proxyMeta,
                "https" : proxyMeta,
            }
            auth = HTTPProxyAuth(proxyUser, proxyPass)
            html = requests.get(url, headers=headers)
            #对每页的每个内容的id即
            selector = etree.HTML(html.text)
            # if (len(selector.xpath('//p[@class="title"]')) > 0):
            #     yzm = selector.xpath('//p[@class="title"]')[0].text
            #     if yzm.find("验证码") >= 0:
            #         print(yzm)
            #         return
            contents = selector.xpath('//div[@class="f-list-item ershoufang-list"]')
            print("=======获取首页========")
            for c in contents:
                title = "".join(c.xpath('.//dd[@class="dd-item title"]/a/text()'))
                childUrl = c.xpath('.//dd[@class="dd-item title"]/a/@href')[0]
                desc = "".join(c.xpath('.//dd[@class="dd-item size"]//text()')).replace("\n", "") \
                    .replace(" ", "")
                addr = "".join(c.xpath('.//dd[@class="dd-item address"]//text()')).replace("\n", "")\
                    .replace(" ", "")
                time.sleep(random.randint(2, 4))
                html1 = requests.get(url = childUrl, headers=headers)
                selector1 = etree.HTML(html1.text)
                print("=======获取【分页】========")


        except Exception as ex:
            print(ex)
        time.sleep(random.randint(1, 4))

if __name__ == '__main__':
    get_url()
