import requests
import pandas as pd
from time import sleep
from datetime import datetime

# ====================== 需要配置的参数 ======================
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0',
    'Referer': 'https://www.hurun.net/zh-CN/Rank/HsRankDetails?pagetype=global',
    'Cookie': '__utmc=245691549; __utmz=245691549.1746502104.7.5.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; Hm_lvt_2b09927a5895e3946dc6de8526befc81=1744596528,1745229564,1746502105; HMACCOUNT=1222F827FD22A500; __utma=245691549.1342174111.1744596528.1746506674.1746508624.8; Hm_lpvt_2b09927a5895e3946dc6de8526befc81=1746508624',  # 必须替换有效Cookie
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6'
}

PROXY = None  # 代理示例：{'http': 'http://user:pass@ip:port'}


# ====================== 核心代码 ======================
def fetch_data(page):
    url = "https://www.hurun.net/zh-CN/Rank/HsRankDetailsList"
    params = {
        'num': 'ND77BFWM',  # 榜单代码
        'search': '',
        'offset': (page - 1) * 20,  # 分页参数
        'limit': 20
    }

    try:
        response = requests.get(
            url,
            params=params,
            headers=HEADERS,
            proxies=PROXY,
            timeout=10
        )
        response.raise_for_status()
        return response.json()
    except Exception as e:
        print(f"第{page}页请求失败：{str(e)}")
        return None


def parse_data(json_data):
    records = []
    for item in json_data['rows']:
        character=item.get('hs_Character')[0]
        # print(character)

        record = {
            '全球排名': item.get('hs_Rank_Global_Ranking'),
            '全球排名变化': str(item.get('hs_Rank_Global_Ranking_Change')),
            '姓名': character.get('hs_Character_Fullname_Cn'),
            '财富值(亿元)': int(item.get('hs_Rank_Global_Wealth')),
            '财富值(十亿美元)': int(item.get('hs_Rank_Global_Wealth_USD')),
            '财富同比增速(%)': str(item.get('hs_Rank_Global_Wealth_Change')).strip('%'),
            '企业名称': item.get('hs_Rank_Global_ComName_Cn'),
            '行业': item.get('hs_Rank_Global_Industry_Cn'),
            '国籍代码': character.get('hs_Character_Nationality'),
            '性别': character.get('hs_Character_Gender'),
            '年龄': str(character.get('hs_Character_Age')),
            '出生日期': character.get('hs_Character_Birthday'),
            '出生地': character.get('hs_Character_BirthPlace_Cn'),
            '常居地': character.get('hs_Character_Permanent_Cn'),
            '教育水平': character.get('hs_Character_Education_Cn'),
            '毕业院校': character.get('hs_Character_School_Cn'),
            '所学专业': character.get('hs_Character_Major_En'),
            '与其他富豪关系': item.get('hs_Rank_Global_Relations'),
            '照片链接': character.get('hs_Character_Photo'),
            '全球排名证书链接': f'https://res.hurun.net{item.get('hs_Rank_Global_Certificate')}',
            '数据来源': '公众号：十点柠檬'
        }
        records.append(record)
        # print(record)
    return records


def main():
    all_data = []
    page = 1

    while True:
        print(f"正在爬取第{page}页...")
        json_data = fetch_data(page)

        if not json_data or not json_data['rows']:
            print("没有更多数据")
            break

        all_data.extend(parse_data(json_data))

        # 判断是否最后一页
        if (page * 20 >= json_data['total']):
            break

        page += 1
        sleep(1)  # 防止请求过于频繁

    # 保存到Excel
    df = pd.DataFrame(all_data)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    df.to_excel(f"E:\\【1】_副业冲浪\\1_公众号文章\\7_2025日更系列\8_2025胡润富豪榜\\2025胡润全球富豪榜_{timestamp}.xlsx", index=False)
    print(f"数据已保存，共{len(df)}条记录")


if __name__ == '__main__':
    main()