from bs4 import BeautifulSoup
from urllib import request
# from bs4 import UnicodeDammit
# 获取学校官网上的头条新闻
url = "https://www.whit.edu.cn/"

try:
    headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre" }
    req = request.Request(url=url,headers=headers)
    data = request.urlopen(req)
    data = data.read()
    data = data.decode()
    # print(data)
    # data = data.decode();

    # from bs4 import UnicodeDammit
    # 处理编码
    # dammit = UnicodeDammit(data, ["utf-8", "gbk"])
    # data = dammit.unicode_markup

    soup = BeautifulSoup(data,"lxml")
    # 获取首页的url（获取第一个）
    # ul = soup.find("ul")
    # 获取首页的所有ul，返回是list
    # ul = soup.find_all("ul")
    # 获取class属性是news-list的所有ul
    # ul = soup.find_all("ul",attrs={"class":"news-list"})
    # print(len(url))
    # print("#########################################")
    # print(ul[0])
    # print("#########################################")
    # print(ul[1])

    # 获取第一个url中的各个li，并将其中的a标签的href属性值和文本内容打印输出
    ul = soup.find_all("ul", attrs={"class": "news-list"})
    if (ul is not None) and (len(ul) > 0):
        firstUl = ul[0]
        all_a = firstUl.find_all("a")
        # print(all_a)
        for a in all_a:
            aHref = a["href"]
            aText = a.text
            print(aText + ":" + aHref)

except Exception as err:
    print(err)