import requests
from bs4 import BeautifulSoup
import csv

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
# 向新  体育网站发送 GET 请求
response = requests.get('https://slamdunk.sports.sina.com.cn/rank#type=conference')

# 检查请求是否成功
if response.status_code == 200:
    # 使用 BeautifulSoup 解析 HTML 内容
    soup = BeautifulSoup(response.content, 'html.parser')

    # 在 HTML 内容中找到包含排名的表格
    table = soup.find(class_='rank-data')

    # 打开一个 CSV 文件来写入数据
    with open('2023-2024_regular_season_rankings.csv', 'w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)

        # 写入 CSV 文件的表头
        writer.writerow(['Team Name', 'Conference', 'Ranking'])

        # 迭代表格中的每一行
        for row in table.find_all('tr')[1:]:
            # 从每一行中提取球队名称、所属联  和排名
            team_name = row.find('td', class_='team-pic').text.strip()
            conference = row.find('td', class_='team-conference').text.strip()
            ranking = row.find('td', class_='team-ranking').text.strip()

            # 将提取的数据写入 CSV 文件
            writer.writerow([team_name, conference, ranking])

    print('数据已成功写入 2023-2024_regular_season_rankings.csv')
else:
    print(f'无法获取网页。状态码：{response.status_code}')