import requests
from bs4 import BeautifulSoup
import json
import time

def get_webpage(url):
    """获取网页HTML内容"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Referer': 'https://slamdunk.sports.sina.com.cn/'
    }
    
    try:
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        return response.text
    except Exception as e:
        print(f"网页请求失败: {e}")
        return None

def extract_table_data(html):
    """提取表格数据"""
    soup = BeautifulSoup(html, 'html.parser')
    table = soup.find('table', {'class': 'table-scores'})
    
    if not table:
        print("未找到table-scores表格")
        return None
    
    # 提取所有tbody中的tr行
    result = []
    for tbody in table.find_all('tbody'):
        for tr in tbody.find_all('tr'):
            # 提取每行中的文本（包括th和td）
            row_data = [cell.get_text(strip=True) for cell in tr.find_all(['th', 'td'])]
            if row_data:  # 忽略空行
                result.append(row_data)
    
    return result if result else None

def save_to_json(data, filename):
    """保存数据到JSON文件"""
    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        print(f"数据已保存至 {filename}")
        return True
    except Exception as e:
        print(f"保存文件失败: {e}")
        return False

def main():
    url = "https://slamdunk.sports.sina.com.cn/rank#type=conference"
    
    print("正在获取网页数据...")
    html = get_webpage(url)
    if not html:
        return
    
    print("正在解析表格数据...")
    table_data = extract_table_data(html)
    if not table_data:
        print("未提取到有效数据")
        return
    
    output_file = r"D:\sina.json"
    print("正在保存数据...")
    if save_to_json(table_data, output_file):
        print("操作成功完成！")

if __name__ == "__main__":
    start_time = time.time()
    main()
    print(f"耗时: {time.time() - start_time:.2f}秒")