import requests
from lxml import etree
import time


# 目前bug，
# 月度最佳，只有2页数据 没有及时退出，应该直接下一页  已解决
# 厨房101， 只有5页数据 已解决
# 流行菜单，数据和原始数据标签有异， 未解决
# ip没有异常处理， 未解决

# 获取代理
def get_ip():
    kdl_url = "https://dps.kdlapi.com/api/getdps/?secret_id=o6ykpp1tqja9zj9g4947&signature=ne52s9ngdvy61nrghkzf7kytjk7c3qgk&num=1&format=json&sep=1"
    ip = requests.get(kdl_url).json()["data"]["proxy_list"][0]
    proxies = {
        # "http://账户名:密码@IP地址:端口号",
        'http': f'http://d2769292027:8bnrqo2g@{ip}',
        'https': f'http://d2769292027:8bnrqo2g@{ip}',
    }
    return proxies


proxies = get_ip()

url = 'https://www.xiachufang.com/explore/'
response = requests.get(url, proxies=proxies, timeout=10)
html_str = response.content.decode()
root = etree.HTML(html_str)

type_list = root.xpath('//div[@class="pure-u-1-4 search-left-nav"]/ul')
# 用来特定指定用第一个类型的food
status = True
for data in type_list:
    # 获取类型列表和相对应的url
    type_url_list = data.xpath('.//li/a/@href')
    type_name_list = data.xpath('.//li/a/text()')

    # zip压缩成字典
    type_dict = dict(zip(type_name_list, type_url_list))
    for index, (key, value) in enumerate(type_dict.items(), start=1):
        # if index == 1:
        #     continue  # 跳过第一个键值对

        # 起始页码
        page = 1

        # 每一个类型的数据
        while True:
            if status:
                url = f'https://www.xiachufang.com/explore/?page={page}'
                if page > 20:
                    status = False
                    break
                print("本周最受欢迎", url)
            else:
                new_url = "https://www.xiachufang.com" + value + "?page=" + str(page)
                url = new_url
                print(key, new_url)
            # 获取对应类型下的食物数据
            response = requests.get(url, proxies=proxies, timeout=10)
            print(f"************************第{page}页************************")

            if response.status_code != 200:
                print(f"共{page - 1}页、已全部取完")
                break

            html_str = response.content.decode()
            root = etree.HTML(html_str)
            food_list = root.xpath('//div[@class="info pure-u"]')
            for food in food_list:
                # 数据清洗: 补路由，去除无用字符，去重
                title = "".join(food.xpath('./p[@class="name"]/a/text()')).strip()
                materials = "、".join(food.xpath('./p[@class="ing ellipsis"]/a/text()'))
                quantity = "七天内 " + "".join(food.xpath('./p[@class="stats green-font"]/span/text()')) + " 人做过"
                author = "".join(food.xpath('./p[@class="author"]/a/text()'))
                print(title, materials, quantity, author)
            print(f"____________________第{page}页结束____________________")
            page += 1
            time.sleep(1)

