import time
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys

import pandas as pd
import os

browser = webdriver.Chrome("DRIVER\chromedriver.exe")

# 创建chrome参数对象,设置chrome浏览器无界面模式
# chrome_options=Options()
# chrome_options.add_argument('--headless')

# # 创建chrome无界面对象
# browser = webdriver.Chrome("DRIVER\chromedriver.exe",options=chrome_options)

# # 访问百度
# browser.get('https://baidu.com/')

# #打印内容
# browser.save_screenshot("no_windows.png")
# print(browser.page_source)

# 导师搜索函数
def Search_By_Tutor(tutor):
    browser.get('http://s.wanfangdata.com.cn/thesis')
    time.sleep(0.2)
    # 搜索框XPath：/html/body/div[3]/div/div[1]/div[1]/div/div/div[1]/div[2]/input
    input_text = browser.find_element_by_xpath("/html/body/div[3]/div/div[1]/div[1]/div/div/div[1]/div[2]/input")
    input_text.send_keys("导师：{}".format(tutor))
    # 检索按钮XPath：/html/body/div[3]/div/div[1]/div[1]/div/div/div[1]/div[2]/div/div[2]
    B = browser.find_element_by_xpath("/html/body/div[3]/div/div[1]/div[1]/div/div/div[1]/div[2]/div/div[2]")
    browser.execute_script("arguments[0].click();", B) #不能直接点击，大概是因为这个按钮上面还有别的东西覆盖
    
    #共有论文数
    Theies_num = browser.find_elements_by_xpath("/html/body/div[3]/div/div[2]/div[2]/div/div[2]/div[2]/div[2]/span[2]/span[2]")
    if len(Theies_num)==1:
        num = int(browser.find_element_by_xpath("/html/body/div[3]/div/div[2]/div[2]/div/div[2]/div[2]/div[2]/span[2]/span[2]").text)

        # 每页显示50条数据 
        B1 = browser.find_element_by_xpath("/html/body/div[3]/div/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[5]/div[2]/div[1]/span")
        browser.execute_script("arguments[0].click();", B1) #不能直接点击，大概是因为这个按钮上面还有别的东西覆盖
        B2 = browser.find_element_by_xpath("/html/body/div[3]/div/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[5]/div[2]/div[2]/div[3]")
        browser.execute_script("arguments[0].click();", B2) #不能直接点击，大概是因为这个按钮上面还有别的东西覆盖
        time.sleep(0.2)
        # /html/body/div[3]/div/div[2]/div[2]/div/div[2]/div[2]/div[3]/div[1]/div[1]/div[2]/a
        
        # 先找每一栏论文，在从每一栏中查找元素
        DIV = browser.find_elements_by_class_name("normal-list")
        '''论文标题'''
        THEIES = []
        '''学位 DEGREE'''
        DEGREE = []
        '''作者 AUTHOR'''
        AUTHOR = []
        '''主修专业 MAJOR'''
        MAJOR = []
        '''学校组织 ORGANIZATION'''
        ORGANIZATION = []
        '''发表年份 YEAR'''
        YEAR = []
        '''论文网址 URL'''
        URL = []
        for div in DIV:
            thesis = div.find_element_by_class_name("ajust").find_element_by_tag_name("a")# "div.ajust > a"
            THEIES.append(thesis.text)
            degree = div.find_element_by_class_name("essay-type")# "essay-type"
            DEGREE.append((degree.text)[:-2]) 
            
            try:
                author = div.find_element_by_class_name("authors")# "authors"
                AUTHOR.append(author.text)
            except Exception:
                AUTHOR.append("")

            major = div.find_element_by_xpath("./div[2]/span[3]")# "div.author-area > span:nth-child(3)"
            MAJOR.append(major.text)
            organization = div.find_element_by_xpath("./div[2]/span[4]/span[1]")# "div.author-area > span.org > span:nth-child(1)"
            ORGANIZATION.append(organization.text)
                
            try:
                year = div.find_element_by_xpath("./div[2]/span[4]/span[2]")# "div.author-area > span.org > span:nth-child(2)"
                YEAR.append(year.text)
            except Exception:
                YEAR.append("")
                
            URL.append(thesis.get_attribute('href')) 

        # 下一页按钮的位置 span.next
        next_page = browser.find_element_by_css_selector("span.next")
        i = 0
        num1 = num
        while (i < num1//50) and (num1 != (i+1)*50):
            next_page.click()
            time.sleep(0.3)

            # 先找每一栏论文，在从每一栏中查找元素
            DIV1 = browser.find_elements_by_class_name("normal-list")
            for div1 in DIV1:
                
                try:
                    author1 = div1.find_element_by_class_name("authors")# "authors"
                    AUTHOR.append(author1.text)
                except Exception:
                    num = num - 1
                    continue  # 没有作者自然不需要记录数据
                
                thesis1 = div1.find_element_by_class_name("ajust").find_element_by_tag_name("a")# "div.ajust > a"
                THEIES.append(thesis1.text)
                degree1 = div1.find_element_by_class_name("essay-type")# "essay-type"
                DEGREE.append((degree1.text)[:-2])  

                major1 = div1.find_element_by_xpath("./div[2]/span[3]")# "div.author-area > span:nth-child(3)"
                MAJOR.append(major1.text)
                organization1 = div1.find_element_by_xpath("./div[2]/span[4]/span[1]")# "div.author-area > span.org > span:nth-child(1)"
                ORGANIZATION.append(organization1.text)
                
                try:
                    year1 = div1.find_element_by_xpath("./div[2]/span[4]/span[2]")# "div.author-area > span.org > span:nth-child(2)"
                    YEAR.append(year1.text)
                except Exception:
                    YEAR.append("")
                
                URL.append(thesis1.get_attribute('href'))
            i += 1
        # print(len(THEIES))
        # print(THEIES[0]+"\n"+THEIES[60]+"\n"+THEIES[75])
        # print(len(DEGREE))
        # print(DEGREE[0]+"\n"+DEGREE[60]+"\n"+DEGREE[75])
        # print(len(AUTHOR))
        # print(AUTHOR[0]+"\n"+AUTHOR[60]+"\n"+AUTHOR[75])
        # print(len(MAJOR))
        # print(MAJOR[0]+"\n"+MAJOR[60]+"\n"+MAJOR[75])
        # print(len(ORGANIZATION))
        # print(ORGANIZATION[0]+"\n"+ORGANIZATION[60]+"\n"+ORGANIZATION[75])
        # print(len(YEAR))
        # print(YEAR[0]+"\n"+YEAR[60]+"\n"+YEAR[75])
        # print(len(URL))
        # print(URL[0]+"\n"+URL[60]+"\n"+URL[75])
    
        # 保存数据为csv格式
        DATA = [ [0] * 9 for i in range(num)]
        for row in range(num):
            DATA[row][0] = THEIES[row]
            DATA[row][1] = AUTHOR[row]     
            DATA[row][2] = ORGANIZATION[row]
            DATA[row][3] = DEGREE[row]
            DATA[row][4] = MAJOR[row]
            DATA[row][5] = tutor
            DATA[row][6] = YEAR[row]
            DATA[row][7] = "中文"
            DATA[row][8] = URL[row]
        test=pd.DataFrame(data=DATA)
        test.to_csv('THEIES\THEIES_{}.csv'.format(tutor),encoding='gbk', index=False, header=False,errors='ignore')

if __name__ == "__main__":
    print("start.......")
    # 院士名单 name_list
    f = open("中国两院院士名单.txt","r",encoding="UTF-8")
    sourceInLine=f.readlines()
    name_list=[]
    for line in sourceInLine:
        temp1=line.strip('\n') #去除首尾 "\n"
        # temp2=temp1.split('\t')
        name_list.append(temp1)
    # print(len(name_list))
    # print(name_list[70])
    
    # 按照导师姓名查找论文数据
    # Search_By_Tutor("张金麟")
    # Search_By_Tutor("任洪强")
    # Search_By_Tutor("卢耀如")
    # Search_By_Tutor("张建民")
    # Search_By_Tutor("梁骏吾")
    # Search_By_Tutor("邓建军")
    # Search_By_Tutor("林国强")
    # Search_By_Tutor("李明")
    for tutor_name in name_list:
        try:
            Search_By_Tutor(tutor_name)
        except Exception:
            Search_By_Tutor(tutor_name)

    # Search_By_Tutor("李永舫")

    
    # 合并全部csv文件
    path = './THEIES/'
    files = os.listdir(path)
    # pd.read_csv('./THEIES/THEIES_赵振东.csv',encoding='gb18030')
    for inputfile in files:
        # print(inputfile)
        df = pd.read_csv(path+inputfile,encoding='gb18030')
        df.to_csv('THEIES_ALL.csv', mode='a', index=False)

    # 关闭浏览器
    browser.quit()
    print("end.........")







