
from selenium import webdriver
import pandas as pd
from itertools import zip_longest
import time
import random
import os

# 这个需要去下载谷歌浏览器的驱动，网上有具体的解决方法，一般运行出错，你直接把报错的放在百度搜索，就可以获得解决教程
os.chdir(r'C:\Python39\Lib\site-packages\selenium\webdriver\chrome')

def crawler_paper(guanjianci, yeshu):
    opt = webdriver.ChromeOptions()
    opt.add_experimental_option('excludeSwitches', ['enable-automation'])
    driver = webdriver.Chrome(options=opt)
    driver.get('https://www.cnki.net/old/')
    time.sleep(2)
    search = driver.find_element_by_id('txt_SearchText')
    search.send_keys('{}'.format(guanjianci))
    submit = driver.find_element_by_class_name('search-btn')
    submit.click()
    time.sleep(3)
    df = []
    driver.switch_to.frame('iframeResult')
    for page in range(1, yeshu):
        try:
            driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
            paper_name = driver.find_elements_by_css_selector('a.fz14')
            author = driver.find_elements_by_css_selector('td.author_flag')
            source = driver.find_elements_by_css_selector('td.author_flag+td')
            datetime = driver.find_elements_by_css_selector('td.author_flag+td+td')
            database = driver.find_elements_by_css_selector('td.author_flag+td+td+td')
            download = driver.find_elements_by_css_selector('span.downloadCount')
            for k, v, m, n, q, w in zip_longest(paper_name, author, source, datetime, database, download, fillvalue=0):
                if w == 0:
                    df.append([k.text, v.text, m.text, n.text, q.text, w])
                else:
                    df.append([k.text, v.text, m.text, n.text, q.text, w.text])
            # print(df)
            print('第{}页爬取完毕'.format(page))
            driver.find_element_by_link_text('下一页').click()
            time.sleep(random.randint(2, 5))
        except:
            print('未爬到数据')
            time.sleep(10)
    return df


def save_data(data: list, gjc):
    inf = pd.DataFrame(data, columns=['论文名', '作者', '来源', '发表日期', '数据库', '下载次数'])
    # inf.to_csv('paper_information.csv',index=False,encoding='UTF-8')
    outputpath = ('C:\\Users\\lenovo\\Desktop\\爬论文\\知网科研\\{}.csv'.format(gjc))  #填写你要保存的路径
    inf.to_csv(outputpath, sep=',', index=False, header=True, encoding='UTF-8')

if __name__ == '__main__':
    gjc = input("请输入你要爬取的关键词：")
    ys = int(input("请输入你要爬取的页数："))
    ys += 1
    # file = "{}.txt".format(gjc)
    # if not os.path.exists(file):
    #     os.mknod(file)
    data = crawler_paper(gjc, ys)
    save_data(data, gjc=gjc)
