import requests
from bs4 import BeautifulSoup
import json

allUniv = []


# 读取球员的tr/td标签
def fillUnivList(soup):
    data = soup.find_all('tr')
    for tr in data:
        ltd = tr.find_all('td')
        if len(ltd) == 0:
            continue
        singleUniv = []
        for td in ltd:
            singleUniv.append(td.string)
        allUniv.append(singleUniv)


if __name__ == '__main__':
    # 读取页面
    url = 'http://data.sports.sohu.com/nba/nba_players_rank.php?order_by=points&spm=smpc.fb-nba-home.top-dc.2.1620824904040nf6byr7'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}
    resp = requests.get(url=url, headers=headers)
    resp.encoding = 'gbk'
    html = resp.text
    #print(type(html) )
    # 解析页面
    soup = BeautifulSoup(html)
    fillUnivList(soup)
    # 球员信息转换为json
    json.dumps(allUniv)
    fo = open('球员排名.txt', 'w', encoding='utf-8')
    # 讲球员存入文件中
    for i in range(len(allUniv)):
        s = str(allUniv[i])
        fo.write(s + '\n')
    fo.close()
