"""__author__=晏"""

# 获取网页
import requests
from bs4 import BeautifulSoup


def get_page():
    url = 'http://sports.sina.com.cn/nba/'
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # response.content返回的是字节流,decode后变成字符串
        return response.content.decode('utf-8')
    return None


def parse_page(html):
    # html = '<div><span>坚持努力</span></div>'
    soup = BeautifulSoup(html, 'lxml')
    # 整齐的格式输出，会做一些补全
    # print(soup.prettify())
    # print(soup.title.string)
    # print(soup.head)
    # print(soup.p)
    # print(soup.p.name)  # 标签的类型名字
    # print(soup.img.attrs['src'])
    # print(soup.img.attrs)  # attrs获取节点的所有属性
    # print(soup.p.contents)  # 返回第一个p标签里面的所有节点列表
    # print(list(soup.a.parents))

    # 用css定位的方式获取节点，空格可以表示儿子或孙子(只要在下面就可以)
    a_list = soup.select('.news-list-b .list a')
    for item in a_list:
        print(item.string)

    # 当class里面有多个样式名，需要把空格去掉
    a_list = soup.select('div.-live-layout-container.row-')
    for item in a_list:
        print(item.string)



def main():
    html = get_page()
    parse_page(html)

if __name__ == '__main__':
    main()





















