import requests
from bs4 import BeautifulSoup
import json

# 获取网页内容
url = "https://slamdunk.sports.sina.com.cn/rank#type=conference"
headers = {'User-Agent': 'Mozilla/5.0'}
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'

# 解析数据
soup = BeautifulSoup(response.text, 'html.parser')

# 初始化数据存储
data = {
    "东部排名": [],
    "西部排名": []
}

# 查找排名表格 - 可能需要根据实际页面调整选择器
tables = soup.find_all('table', class_='data-table')[:2]  # 取前两个表格

# 提取数据
for i, table in enumerate(tables):
    conference = "东部排名" if i == 0 else "西部排名"

    # 跳过表头，遍历数据行
    for row in table.find_all('tr')[1:]:
        cols = row.find_all('td')
        if len(cols) >= 6:  # 确保有足够列数
            team = {
                "排名": cols[0].text.strip(),
                "球队": cols[1].text.strip(),
                "胜": cols[2].text.strip(),
                "负": cols[3].text.strip(),
                "胜率": cols[4].text.strip(),
                "胜差": cols[5].text.strip()
            }
            data[conference].append(team)

# 保存为JSON
with open('nba_rankings.json', 'w', encoding='utf-8') as f:
    json.dump(data, f, ensure_ascii=False, indent=2)

# 保存为CSV
for conference in data:
    filename = f'nba_{conference}.csv'
    with open(filename, 'w', encoding='utf-8') as f:
        # 写入表头
        f.write("排名,球队,胜,负,胜率,胜差\n")
        # 写入数据
        for team in data[conference]:
            line = f"{team['排名']},{team['球队']},{team['胜']},{team['负']},{team['胜率']},{team['胜差']}\n"
            f.write(line)

print("数据已保存为 nba_rankings.json 和 nba_东部排名.csv/nba_西部排名.csv")