import requests
import pandas as pd
from time import sleep
from datetime import datetime

# ====================== 需要配置的参数 ======================
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0',
    'Referer': 'https://www.hurun.net/zh-CN/Rank/HsRankDetails?pagetype=unicorn',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6'
}
cookies={
    "__utmz": "245691549.1755567495.13.9.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/",
    "__utmc": "245691549",
    "Hm_lvt_2b09927a5895e3946dc6de8526befc81": "1755567495,1756085570",
    "HMACCOUNT": "1222F827FD22A500",
    "Hm_lpvt_2b09927a5895e3946dc6de8526befc81": "1756090145",
    "__utma": "245691549.1342174111.1744596528.1756085569.1756090145.18",
    "__utmb": "245691549.1.10.1756090145"
}





# ====================== 核心代码 ======================
# 定义子函数1：访问湖润网并获取json数据
def fetch_data(page):
    url = 'https://www.hurun.net/zh-CN/Rank/HsRankDetailsList?'
    params = {
        'num': 'E9W16F3H',  # 榜单代码
        'search': '',
        'offset': (page - 1) * 20,  # 分页参数，每页显示20条数据
        'limit': 20
    }

    try:
        response = requests.get(url,params=params,headers=headers,cookies=cookies)
        response.raise_for_status()

        # 返回json数据
        return response.json()
    except Exception as e:
        print(f"第{page}页请求失败：{str(e)}")
        return None


# 定义子函数2：解析并提取所需数据
def parse_data(json_data):
    records = []
    for item in json_data['rows']:
        record = {
            '上榜年份': int(item.get('hs_Rank_Unicorn_Year')),
            '全球排名': int(item.get('hs_Rank_Unicorn_Ranking')),
            '全球排名变化': str(item.get('hs_Rank_Unicorn_Ranking_Change')),
            '企业估值(亿元)': int(item.get('hs_Rank_Unicorn_Wealth')),
            '企业估值(十亿美元)': int(item.get('hs_Rank_Unicorn_Wealth_USD')),
            '企业估值变化(亿元？？)': str(item.get('hs_Rank_Unicorn_Wealth_Change')),
            '企业名称_中文': item.get('hs_Rank_Unicorn_ComName_Cn'),
            '企业名称_英文': item.get('hs_Rank_Unicorn_ComName_En'),
            '企业总部地址_中文': item.get('hs_Rank_Unicorn_ComHeadquarters_Cn'),
            '企业总部地址_英文': item.get('hs_Rank_Unicorn_ComHeadquarters_En'),
            '行业_中文': item.get('hs_Rank_Unicorn_Industry_Cn'),
            '行业_英文': item.get('hs_Rank_Unicorn_Industry_En'),
            '企业创始人姓名_中文': item.get('hs_Rank_Unicorn_ChaName_Cn'),
            '企业创始人姓名_英文': item.get('hs_Rank_Unicorn_ChaName_En'),
            '与其他独角兽关系': item.get('hs_Rank_Unicorn_Relations'),
            '校核时间': item.get('hs_Rank_Unicorn_MTime'),
            '榜单ID':item.get('hs_Rank_Unicorn_ID'),
            '企业Logo_图片url': item.get('hs_Rank_Unicorn_CLogo'),
            '荣誉证书_图片url': f"https://res.hurun.net{item.get('hs_Rank_Unicorn_Certificate')}",
            '数据来源': '公众号：十点柠檬'
        }
        records.append(record)
        # print(record)
    return records


# 定义主函数
def main():
    all_data = []
    page = 73

    while True:
        print(f"正在爬取第{page}页...")
        json_data = fetch_data(page)

        if not json_data or not json_data['rows']:
            print("没有更多数据")
            break

        all_data.extend(parse_data(json_data))

        # 判断是否最后一页
        if (page * 20 >= json_data['total']):
            break

        page += 1
        sleep(1)  # 防止请求过于频繁

    # 保存到Excel
    df = pd.DataFrame(all_data)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    df.to_excel(f"E:\\【1】_副业冲浪\\1_公众号文章\\7_2025日更系列\\15_2025胡润独角兽榜\\2025胡润全球独角兽榜_{timestamp}.xlsx", index=False)
    print(f"数据已保存，共{len(df)}条记录")


if __name__ == '__main__':
    main()