import requests
import math
import time
import redis
import requests
from lxml import etree


# def get_ip():
#     proxyAddr = "tun-oolotr.qg.net:18447"
#     authKey = "94AF792C"
#     password = "79DBC0D39912"
#     # 账密模式
#     proxyUrl = "http://%(user)s:%(password)s@%(server)s" % {
#         "user": authKey,
#         "password": password,
#         "server": proxyAddr,
#     }
#     proxies = {
#         "http": proxyUrl,
#         "https": proxyUrl,
#     }
#     return proxies
#
#
# proxies = get_ip()


def requests_get_with_retry(url):
    for _ in range(10):
        try:
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
            }
            proxyAddr = "tun-oolotr.qg.net:18447"
            authKey = "94AF792C"
            password = "79DBC0D39912"
            # 账密模式
            proxyUrl = "http://%(user)s:%(password)s@%(server)s" % {
                "user": authKey,
                "password": password,
                "server": proxyAddr,
            }
            proxies = {
                "http": proxyUrl,
                "https": proxyUrl,
            }
            response = requests.get(url, headers=headers, proxies=proxies)
            html_str = response.content.decode()
            if "本次访问已触发人机验证，请按指示操作" in html_str:
                raise Exception("触发人机验证")
            if "当前系统正忙，请登录后再试" in html_str:
                raise Exception("系统繁忙")
            return response
        except Exception as e:
            # print(f"{url}请求失败，正在重试：{type(e)} {e}")
            # time.sleep(5)
            with open("error.txt", "a", encoding="utf-8") as f:
                f.write(f"{url}请求失败，正在重试：{type(e)} {e}\n")
    return None


url = 'https://hz.lianjia.com/zufang/'
redis_conn = redis.Redis(host='localhost', port=6379, decode_responses=True)
response = requests_get_with_retry(url)
html_str = response.content.decode()
root = etree.HTML(html_str)
area_list = root.xpath("//div[@id='filter']/ul[2]/li[@class='filter__item--level2  ']/a")
for area in area_list:
    area_name = "".join(area.xpath("./text()"))
    area_url = "https://hz.lianjia.com" + "".join(area.xpath("./@href"))
    response = requests_get_with_retry(area_url)
    html_str = response.content.decode()
    root = etree.HTML(html_str)
    # 获取该区下的租房数量
    house_count = int("".join(root.xpath("//span[@class='content__title--hl']/text()")))
    print(f"杭州--{area_name}--{house_count}")
    if house_count <= 3000:
        # 存任务队列
        for page in range(1, math.ceil(house_count / 30) + 1):
            # 获取当前区域的链接，并存储到数据库中
            task_url = area_url + f"pg{page}/"
            redis_conn.rpush("lianjia_task_queue", task_url)
            print(area_name, task_url)
    else:
        # 在获取下一级的商圈获取租房数量
        county_list = root.xpath("//div[@id='filter']/ul[4]/li[@class='filter__item--level3  ']/a")
        for county in county_list:
            # 获取商圈名字
            county_name = "".join(county.xpath("./text()"))

            # 获取商圈的url地址
            county_url = "https://hz.lianjia.com" + "".join(county.xpath("./@href"))
            response = requests_get_with_retry(county_url)
            html_str = response.content.decode()
            root = etree.HTML(html_str)

            # 获取该商圈下的租房数量
            county_house_count = int("".join(root.xpath("//span[@class='content__title--hl']/text()")))
            print(f"杭州--{area_name}--{county_name}--{county_house_count}")
            if county_house_count <= 3000:
                # 存任务队列
                for page in range(1, math.ceil(county_house_count / 30) + 1):
                    task_url = county_url + f"pg{page}/"
                    redis_conn.rpush("lianjia_task_queue", task_url)
                    print(county_name, task_url)
            else:
                # 如果还有大于3000的再根据价格进行划分
                price_list = [(0, 1000), (1000, 1500), (1500, 2000), (2000, 3000), (3000, 5000), (5000, 10000),
                              (10000, 10000000)]
                for price in price_list:
                    price_url = county_url + f'brp{price[0]}erp{price[1]}/'
                    response = requests_get_with_retry(price_url)
                    html_str = response.content.decode()
                    root = etree.HTML(html_str)
                    house_count = int("".join(root.xpath("//span[@class='content__title--hl']/text()")))
                    print(f"杭州--{area_name}--{county_name}--{price}--{house_count}")
                    if house_count > 3000: house_count = 3000
                    if house_count <= 3000:
                        # 存任务队列
                        for page in range(1, math.ceil(house_count / 30) + 1):
                            # 获取价格下的租房数量
                            task_url = county_url + f"/pg{page}/" + f'brp{price[0]}erp{price[1]}/'
                            redis_conn.rpush("lianjia_task_queue", task_url)
                            print(county_name, price, task_url)
