import requests
from bs4 import BeautifulSoup
import json
def save_data(rankings):
    """保存数据到文件"""
    if not rankings:
        return

    # 保存为JSON
    with open('nba_rankings.json', 'w', encoding='utf-8') as f:
        json.dump(rankings, f, ensure_ascii=False, indent=2)

    print("数据保存成功！")

def get_nba_rankings():
    # ESPN排名页面
    url = "https://www.espn.com/nba/standings"

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }

    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')

        rankings = {
            "Eastern Conference": [],
            "Western Conference": []
        }

        # 查找东西部联盟表格
        for i, table in enumerate(soup.find_all('div', class_='ResponsiveTable')):
            conference = "Eastern Conference" if i == 0 else "Western Conference"

            for row in table.find_all('tr')[1:]:  # 跳过表头
                cols = row.find_all('td')
                if len(cols) >= 3:
                    team_name = cols[0].find('a').get_text(strip=True) if cols[0].find('a') else cols[0].get_text(
                        strip=True)

                    rankings[conference].append({
                        "球队": team_name,
                        "胜场": cols[1].get_text(strip=True),
                        "负场": cols[2].get_text(strip=True),
                        "胜率": cols[3].get_text(strip=True),
                        "胜差": cols[4].get_text(strip=True) if len(cols) > 4 else "N/A"
                    })

        save_data(rankings)
        return rankings

    except Exception as e:
        print(f"获取数据失败: {e}")
        return None


if __name__ == "__main__":
    get_nba_rankings()
