import re
import requests
url="https://casad.cas.cn/ysxx2022/ysmd/qtys/"  #要爬取网站的url
dic={
    "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Mobile Safari/537.36 Edg/126.0.0.0"
}#伪装ua，防止反爬
resp = requests.get(url,headers=dic)
resp.encoding = "utf-8"  #解决乱码格式

obj1 = re.compile(r'<div class="rmbs_a">(?P<ul>.*?)</div>', re.S)#爬取的是超链接页面

obj2= re.compile(r'<a href="(?P<href>.*?)"')#爬取具体的链接result1 = obj1.finditer(resp.text) #爬取的是院士详情信息子页面的超链接

result1 = obj1.finditer(resp.text) #爬取的是院士详情信息子页面的超链接
child_href_list=[]#将链接存放在列表中
for it in result1:
    dic = it.groupdict()
    ul = it.group("ul")
    result2 = obj2.finditer(ul)
    for itt in result2:
        # print(itt.group("href"))  # 输出检查
        child_href_list.append(itt.group("href"))#爬取链接放到list中
    # print("over")
flag = 0
#访问子页面，拿到子页面的请求后，分析网页源代码
for href in child_href_list:
    child_resp = requests.get(href)
    child_resp.encoding = "utf-8"
    flag = flag + 1
    print("NO.", flag)
    print(href)