import re
import requests
import  csv

url="https://casad.cas.cn/ysxx2022/ysmd/qtys/"  #要爬取网站的url
dict={
    "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Mobile Safari/537.36 Edg/126.0.0.0"
}#伪装ua，防止反爬
resp = requests.get(url,headers=dict)
resp.encoding = "utf-8"  #解决乱码格式


obj1 = re.compile(r'<div class="rmbs_a">(?P<ul>.*?)</div>', re.S)#爬取的是超链接页面

obj2= re.compile(r'<a href="(?P<href>.*?)"')#爬取具体的链接

obj3= re.compile(r'<script>.*?var currentChannel1 = "(?P<bumen>.*?)"'
                 r'.*?<p class="wztitle">(?P<name>.*?)</p>'
                 r'.*?<div class="acadImg"><img src="(?P<imgul>.*?)"'
                 r'.*?<div class="acadTxt">.*>(?P<String>.*)。.*?<div class="Previous_Next" style="overflow: hidden; width: 100%;">',re.S)
# obj3_other = re.compile(r'<script>.*?var currentChannel1 = "(?P<bumen>.*?)"'
#                  r'.*?<p class="wztitle">(?P<name>.*?)</p>'
#                  r'.*?<div class="acadImg"><img src="(?P<imgul>.*?)".*?<div class="acadTxt">.*>(?P<style>.*?)家'
#                  r'，(?P<work>.*?)。.*?<div class="Previous_Next" style="overflow: hidden; width: 100%;">'
#                  ,re.S)
obj3_sqecial_1 = re.compile(r'<script>.*?var currentChannel1 = "(?P<bumen>.*?)"'
                 r'.*?<p class="wztitle">(?P<name>.*?)</p>'
                 r'.*?<div class="acadImg"><img src="(?P<imgul>.*?)".*?<div class="acadTxt">.*>(?P<style>.*?)家'
                 r'，.*?>(?P<work>.*?)<.*。(?P<born>.*?)<.*?>生于(?P<home>.*?)</span>。.*?。(?P<year>.*?)当选.*?<div class="Previous_Next" style="overflow: hidden; width: 100%;">'
                 ,re.S)

obj3_sqecial = re.compile(r'<script>.*?var currentChannel1 = "(?P<bumen>.*?)"'
                 r'.*?<p class="wztitle">(?P<name>.*?)</p>'
                 r'.*?<div class="acadImg"><img src="(?P<imgul>.*?)".*?<div class="acadTxt">.*>(?P<style>.*?)家'
                 r'，(?P<work>.*?)。(?P<born>.*?)生于(?P<home>.*?)。.*?<.*，(?P<year>.*?)转为.*?<div class="Previous_Next" style="overflow: hidden; width: 100%;">'
                 ,re.S)

obj4 = re.compile(r'(?P<style>.*?)家，(?P<work>.*?)。(?P<born>.*?)生于(?P<home>.*?)。.*?。(?P<year>.*?)当选',re.S)

obj4_1=re.compile(r'(?P<style>.*?)家。(?P<work>.*?)。(?P<born>.*?)生于(?P<home>.*?)。.*?。(?P<year>.*?)当选',re.S)

obj4_2 =re.compile(r'(?P<style>)(?P<work>.*?)。(?P<born>.*?)生于(?P<home>.*?)。.*?。(?P<year>.*?)当选',re.S)

obj4_3 = re.compile(r"(?P<style>.*?)家，(?P<work>.*?)。(?P<born>.*?)出生，(?P<home>.*?)人。.*?。(?P<year>.*?)当选",re.S)

obj4_4 = re.compile(r"(?P<style>.*?)家。(?P<work>.*?)。(?P<born>.*?)出生，(?P<home>.*?)人。.*?。(?P<year>.*?)当选",re.S)

obj4_5 = re.compile(r"(?P<style>.*?)家。(?P<work>.*?)。(?P<born>.*?)出。(?P<home>).*?。(?P<year>.*?)当选",re.S)

obj4_6 = re.compile(r"(?P<style>.*?)家，(?P<work>.*?)。(?P<born>.*?)出生。(?P<home>).*?。(?P<year>.*?)当选",re.S)

obj4_7 = re.compile(r"(?P<style>.*?)家，(?P<work>.*?)。(?P<born>.*?)生于(?P<home>.*?)。(?P<year>.*?)当选",re.S)

obj4_8 = re.compile(r"(?P<style>.*?)家，(?P<work>.*?)。(?P<born>.*?)生，(?P<home>.*?)人。(?P<year>.*?)当选",re.S)

obj4_9 = re.compile(r"(?P<style>.*?)家，(?P<born>.*?)出生，(?P<work>.*?)。(?P<home>.*?)人。.*?。(?P<year>.*?)当选",re.S)

obj4_10 = re.compile(r"(?P<style>.*?)家，(?P<work>.*?)，(?P<born>.*?)生于(?P<home>.*?)。.*?。(?P<year>.*?)当选",re.S)





result1 = obj1.finditer(resp.text) #爬取的是院士详情信息子页面的超链接
child_href_list=[]#将链接存放在列表中
for it in result1:
    dic = it.groupdict()
    ul = it.group("ul")
    result2 = obj2.finditer(ul)
    for itt in result2:
        # print(itt.group("href"))  # 输出检查
        child_href_list.append(itt.group("href"))
    # print("over")
flag = 0
#访问子页面，拿到子页面的请求后，分析网页源代码
#csv文件用于数据分析  pandas
f = open("Test.csv",mode="w",encoding="utf-8") #准备csv文件
csvwriter = csv.writer(f) #准备一个csv Writer

for href in child_href_list:
    child_resp = requests.get(href)
    child_resp.encoding = "utf-8"
    flag = flag + 1

    resultT = obj3_sqecial.search(child_resp.text)
    if flag == 268:
        resultT = obj3_sqecial_1.search(child_resp.text)
    if resultT is None:
        resultT = obj3.search(child_resp.text)
        if resultT is None:

            continue
    else:
          # 把数据处理成字典
        dic = resultT.groupdict()
        csvwriter.writerow(dic.values())
        print("写入成功" + "No." , flag)

        continue

    # print(resultT.group("bumen"))
    # print(resultT.group("imgul"))
    # print(resultT.group("name"))
    # print(resultT.group("String"))
    result_string = obj4.search(resultT.group("String"))

    if result_string is None:
        result_string = obj4_1.search(resultT.group("String"))

        if result_string is None:
            result_string = obj4_2.search(resultT.group("String"))

            if result_string is None:
                result_string = obj4_3.search(resultT.group("String"))

                if result_string is None:
                    result_string = obj4_4.search(resultT.group("String"))

                    if result_string is None:
                        result_string = obj4_5.search(resultT.group("String"))

                        if result_string is None:
                            result_string = obj4_6.search(resultT.group("String"))

                            if result_string is None:
                                result_string = obj4_7.search(resultT.group("String"))

                                if result_string is None:
                                    result_string = obj4_8.search(resultT.group("String"))
                                    if result_string is None:
                                        result_string = obj4_9.search(resultT.group("String"))
                                        if result_string is None:
                                            result_string = obj4_10.search(resultT.group("String"))


  # 把数据处理成字典
    resultT.groupdict().update(result_string.groupdict())
    dic = resultT
    csvwriter.writerow(dic.groupdict().values())
    print("写入成功"+"No.",flag)

f.close()
print("over")



