import requests
# from bs4 import BeautifulSoup
res = requests.get("https://tieba.baidu.com/t/f/?class=college")
# print(res.content)
print(res.text)
# r1 = res.json()
# print(r1)




# import json
# # import time
# import requests
# from bs4 import BeautifulSoup
#
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}
#
# def get_school_list():
#
#     url = 'https://tieba.baidu.com/t/f/?class=college'
#     resp = requests.get(url, headers=headers)
#     resp.raise_for_status()
#     soup = BeautifulSoup(resp.text, 'html.parser')
#
#     schools = []
#     for a in soup.select('a.each_topic_entrance_item'):
#         name = a.get_text(strip=True)
#         href = a['href']
#         tieba_url = 'https:' + href if href.startswith('//') else 'https://tieba.baidu.com' + href
#         schools.append({'school': name, 'tieba_url': tieba_url})
#     return schools
#
#
#
# def main():
#     schools = get_school_list()
#
#     with open('university_tieba.json', 'w', encoding='utf-8') as f:
#         json.dump(schools, f, ensure_ascii=False, indent=2)
#     print('全部完成！已保存 university_tieba.json')
#
# if __name__ == '__main__':
#     main()