
from pprint import pprint

import requests
from bs4 import BeautifulSoup


#获取网页
def get_page():
    url = 'http://sports.sina.com.cn/nba/'
    #伪造请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    respose = requests.get(url, headers=headers)
    if respose.status_code == 200:
        # respose.conent返回的是字节流，用decode转换成字符串
        return respose.content.decode('utf-8')
    return None


#
def parse_page(html):
    #html = '<div><span>坚持努力</span></div>'
    soup = BeautifulSoup(html, 'lxml')
    #整齐的格式输出，甚至会做一些补全
    # print(soup.prettify())
    # print(soup.title.string)
    # print(soup.head)
    # print(soup.link.name)
    # print(soup.p.name) # 标签的名字
    # print(soup.img.attrs) #attrs拿节点里的所有属性
    # print(soup.img.attrs['src'])

    # print(soup.p.contents) # 取p节点下的所有子节点列表
    # print(list(soup.a.parents)) #祖先节点
    # print(list(soup.p.descendants))  # 取p节点所有⼦孙节点

#     用css定位的方式获取节点,空格可以表示儿子或者孙子（只要在里面下级就可以了）
#     a_list = soup.select('.news-list-b .list a')
#     for itme in a_list:
#         print(itme.string)

    # 当class里面有多个样式名，需要把空格去掉
    a_list = soup.select('div.-live-layout-container.row-fuild .news-list-b .list a')
    for item in a_list:
        print(item.string)


def main():
    html = get_page()
    #print(html)
    parse_page(html)


if __name__ == '__main__':
    main()