# -*- coding:utf-8 -*-

import urllib.request as request
from bs4 import BeautifulSoup
import pandas as pd

# 1、网址url  --豆瓣网

headers = {'User-Agent':'Mozilla/5.0 3578.98 Safari/537.36'}

url = 'https://nba.hupu.com/'

url = request.Request(url, headers=headers)
# 2、直接请求  返回结果R
response =  request.urlopen(url)

# 3、获取状态码，如果是200表示获取成功
print('状态码：', response.getcode())

# 4、读取内容
data = response.read()

# 读取网页内容
soup=BeautifulSoup(data,"lxml")



play_tab = soup.select("ul[class = 'player-tab'] li")
players = soup.select("li a[class='playerName']")
teams = soup.select("li a[class='playerTeam']")
scores = soup.select("li span[class='playerMax']")


dict = {"players":players,"teams":teams,"scores":scores}

frame = pd.DataFrame(dict)
frame.aggregate
print(frame)

# i = 0
# for index,element in enumerate(players):
#     if((index+1)%5 == 1):
#         print(play_tab[i].text)
#         i += 1
#     #print(players[index].text, teams[index].text, scores[index].text)
#     obj = pd.Series([players[index].text,teams[index].text],scores[index].text)


# print(res1)
# 5、设置编码
# data = data.decode('utf-8')

# 获取所有图片的url
# listurl = re.findall(r'<a class="teamname ">', data)

# 东西部排名
# teamTeam = soup.select("ul li a[class = 'teamTeam']")
# teamRec = soup.select("ul li span[class = 'teamRec']")
#
# teamNow = soup.select("ul li span[class = 'teamNow']")
#
# teamCha = soup.select("ul li span[class = 'teamCha']")
#
# for index,element in enumerate(players):
#     if index == 0 :
#         print('西部')
#     if(index == 16):
#         print('东部')
#
#
#     print(teamTeam[index].text, teamRec[index].text, teamNow[index].text, teamCha[index].text)


