# 导出txt+默认导出xlsx；只导出xlsx【页数不成超出总数！！！】
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys#模拟键盘操作事件
from selenium.webdriver.common.by import By

# 注意【先升级pip install --upgrade pip[否则安装pyuserinput==0.1.10无法自动安装pyhook]】
# 注意【pymouse==1.0[在pymouse库的初始化方法中第92行的from windows import PyMouse,PyMouseEvent改为from pymouse.windows import PyMouse,PyMouseEvent。]】
# 注意【pyuserinput==0.1.10[自动安装hook]】
# 修改【_edge_sci_url_title_abstr_3_1.py】统一在e:\\sci【修改文件保存路径*2处；桌面新建sci文件夹】
# 修改【_edge_sci_url_title_abstr_3_1.py修改右键翻译的单击位置；右键往下次数，edge>115,下7】

import keyboard
import pymouse
# import pyperclip#【即可】
from bs4 import BeautifulSoup
import pyautogui
# import re
# import requests
import openpyxl
# import sys


# key=input("输入英文关键词：")
# page=input("爬第几页：")
# url="https://www.sciencedirect.com/search?qs=microfluid%20sphere&show=25&offset=25"

def sci(offset,key,page,onlytoxlsx=False ):#True，只导出xlsx；false【只导出txt】
    num_list=[]
    url_list=[]
    title_list=[]
    abstr_list=[]
    qikan_list=[]
    titlezh_list=[]
    abstrzh_list=[]
# def driver_set():
    driver = webdriver.Edge()
    # driver.maximize_window()
    driver.set_window_size(800,750) # 设置浏览器大小 单位像素
    url="https://www.sciencedirect.com/search?qs="+key.replace(" ","%20")+"&show="+str(offset)+"&offset="+str((int(page)-1)*offset)
    driver.get(url)
    # driver.get("https://www.sciencedirect.com/search?qs=microfluid%20sphere&show=25&offset=25")
    # driver.get("https://www.sciencedirect.com/search?qs=Solvation%20and%20complexation%20of%20europium%28III%29%20ions%20in%20triflate%20and%20chloride")
    sleep(4)#win10会弹出弹框，手动点击edge，需要点时间

    if onlytoxlsx==False :#默认，只导出xlsx=false
        expandButtons = driver.find_elements(By.XPATH,'//*[text()="Abstract"]')
        for button in expandButtons:
    #         print(button)
            button.click()
            sleep(0.5)
        sleep(2)


# def get_total():
    html_text=driver.page_source#【必须在展开之后！！！！】
    soup=BeautifulSoup(html_text, 'html.parser')
    # xxx=soup.find_all('div',class_="result-item-content")#逐渐往下级
    xxx=soup.find_all('div',class_="result-item-container u-visited-link")#一篇里的总级【含序号】
    for xxx1 in xxx:#一个文献里找完
    #         序号
        if xxx1.find('div',class_="rank-number u-text-center u-text--").text != "":
            uuu1=xxx1.find('div',class_="rank-number u-text-center u-text--").text
            print(uuu1)#None
        else:
            uuu1='序号无'
            
    #         链接
        if xxx1.find('a',class_="anchor result-list-title-link u-font-serif text-s anchor-default").attrs['href'] !="":
            yyy1=xxx1.find('a',class_="anchor result-list-title-link u-font-serif text-s anchor-default").attrs['href']
            print("https://www.sciencedirect.com"+yyy1)
        else:
            yyy1="链接无"
    #     url_list.append("https://www.sciencedirect.com"+yyy1)
    #         标题
    #         zzz1=xxx1.find('a',class_="anchor result-list-title-link u-font-serif text-s anchor-default")
        if xxx1.find('span',class_="anchor-text").text != "":
            zzz1=xxx1.find('span',class_="anchor-text").text#【第一个才是标题
        #     title_list.append(zzz1)
            print(zzz1)
        else:
            zzz1="标题无"
#         期刊
        if xxx1.find('span',class_="srctitle-date-fields").text !="":
            aaa1=xxx1.find('span',class_="srctitle-date-fields").text
            print(aaa1)
        else:
            aaa1="期刊无"
            
        if onlytoxlsx==False: # 即，要导出所有
        #         摘要
            if xxx1.find_all('div',class_="abstract-section") != None:#all_find与find不同！！！
                www1=""
                for www0 in xxx1.find_all('div',class_="abstract-section"):
                    www1=www1+www0.text
                print(www1)
            else:
                www1="摘要无"
            num_list.append(uuu1)
            url_list.append("https://www.sciencedirect.com"+yyy1)
            title_list.append(zzz1)
            qikan_list.append(aaa1)
            abstr_list.append(www1)
                
#                【 以下已经失效】
#             www1=xxx1.find_all('div',class_="text-s branded preview-body u-font-serif")#无摘要，就无【但还是会查找到，只是为“”空】    
#         #     print(xx1)
#             for www2 in www1:
#                 www3=www2.find_all('div')
#         #         print(www3.text)#出错
#                 for www4 in www3:
#                     print(www4.text)#成功
#         #             if not www3:#www3==[]:#None:#还是无效【到底返回什么东西？？】【name 'www3' is not defined】【】not www3,反而跳过
#                     if www4.text ==None:#无效
#                 # #     if www4.text =="":无效
#         #                 abstr_list.append("none")#【写入none无效？！】
#                         pass
#                     if www4.text =="":
#                         pass
#                     else:
#                         num_list.append(uuu1)
#                         url_list.append("https://www.sciencedirect.com"+yyy1)
#                         title_list.append(zzz1)
#                         qikan_list.append(aaa1)
#                         abstr_list.append(www4.text)
        else:
            num_list.append(uuu1)
            qikan_list.append(aaa1)
            url_list.append("https://www.sciencedirect.com"+yyy1)
            title_list.append(zzz1)            
            
    if onlytoxlsx==False:#默认，只导出xlsx=false，即导出全部
        sleep(3)
        # def trans():
        #     翻译
        pyautogui.press('home')
#         keyboard.press_and_release('home')win10无效【打开edge会跳弹框！！】【需要手动点击】
        m=pymouse.PyMouse()#【鼠标位置】
#         m.click(200,180,2)#浏览器大小800*750
        m.click(200,200,2)#浏览器大小800*750【桌面缩放125%】
        # m.click(960,380,2) # 鼠标点击，x,y是坐标位置；button：1表示左键，2表示点击右键；n：点击次数，默认是1次，2表示双击
        # m.click()
        sleep(2)
#         for i in range(9):#右键找到翻译【edge=105】
        for i in range(7):#右键找到翻译win10【edge=115以上】
            keyboard.press_and_release('down')#win10又有效？？
            sleep(0.2)
        keyboard.press_and_release('enter')
        sleep(2)

        # 下翻完成翻译
        for i in range(24):#下翻
            keyboard.press_and_release('pagedown')
            sleep(1)
        keyboard.press_and_release('end')
        print("3s后获取中文")
        sleep(3)


        # 获取翻译后zh
        htmlzh_text=driver.page_source#【必须在展开之后！！！！】
        soupzh=BeautifulSoup(htmlzh_text, 'html.parser')
        xxxzh=soupzh.find_all('div',class_="result-item-content")#逐渐往下级【一篇里面的总级，不含序号】
        for xxxzh1 in xxxzh:#一个文献里找完
        #     标题zh
            if xxxzh1.find('span',class_="anchor-text").text !="":
                zzzzh1=xxxzh1.find('span',class_="anchor-text").text#【第一个才是标题
            #     titlezh_list.append(zzzzh1)
                print(zzzzh1)
            else:
                zzzzh="标题zh无"
            
        #     摘要zh
            if xxxzh1.find_all('div',class_="abstract-section") != None:
                wwwzh1=""
                for wwwzh0 in xxxzh1.find_all('div',class_="abstract-section"):
                    wwwzh1 =wwwzh1+wwwzh0.text
                print(wwwzh1)
            else:
                wwwzh1="摘要zh无"
            titlezh_list.append(zzzzh1)            
            abstrzh_list.append(wwwzh1)#
            
#             【以下已经失效】
#             wwwzh1=xxxzh1.find_all('div',class_="text-s branded preview-body u-font-serif")#无摘要，就无【但还是会查找到，只是为“”空】    
#             for wwwzh2 in wwwzh1:
#                 wwwzh3=wwwzh2.find_all('div')
#         #         print(www3.text)#出错
#                 for wwwzh4 in wwwzh3:
#                     print(wwwzh4.text)#成功
#         #         if not wwwzh3:
#                     if wwwzh4.text==None:
#                         pass
#                     if wwwzh4.text=="":
#                         pass
#         #             abstrzh_list.append("none")#【写入none无效？！】
#                     else:
#                         titlezh_list.append(zzzzh1)            
#                         abstrzh_list.append(wwwzh4.text)#成功得到【无abstr            


        # def writeout(num_list,url_list,title_list,titlezh_list,abstr_list,abstrzh_list):
#         path='C:\\Users\\King\\Desktop\\sci\\'+key+'_zh.txt'
#         path='C:\\Users\\ytlds\\Desktop\\sci\\'+key+'_zh.txt'#win10
#         path='C:\\Users\\hp\\Desktop\\sci\\'+key+'_zh.txt'
#         path='C:\\Users\\7200i5\\Desktop\\sci\\'+key+'_zh.txt'
        path='E:\\sci\\'+key+'_zh.txt'
        with open(path,'a+',encoding="utf-8") as f:
    #     with open('C:\\Users\\King\\Desktop\\保存链接+标题+摘要_zh.txt','a+',encoding="utf-8") as f:## a+ 模式会保留原内容，并在文件末尾添加内容
            try:
                for i in range(len(titlezh_list[i])):
#                 for i in range(len(url_list)):#【i从0开始啊！！】
#                     f.write("【"+str(i+1)+"】"+titlezh_list[i]+"\n"+abstrzh_list[i]+"\n"+"\n")#+url_list[i]+"\n")
#                     print("【"+str(i+1)+"】"+titlezh_list[i]+"\n"+abstrzh_list[i]+"\n"+"\n")
#                     f.write("【"+str(i+1)+"】"+url_list[i]+"\n"+title_list[i]+"\n"+abstr_list[i]+"\n")#【list index out of range？？？】无法写入？？？
#                     f.write("【"+num_list[i]+"】"+url_list[i]+"\n"+title_list[i]+"\n"+titlezh_list[i]+"\n"+abstr_list[i]+"\n"+abstrzh_list[i]+"\n"+"\n")
                    f.write("【"+num_list[i]+"】"+url_list[i]+"\n"+qikan_list[i]+"\n"+title_list[i]+"\n"+titlezh_list[i]+"\n"+abstr_list[i]+"\n"+abstrzh_list[i]+"\n"+"\n")#【list index out of range？？？】
                
            except:
                print("出错：第"+str(i+1)+"篇")
                pass
#         f.close()#with open不必关闭
            
    if onlytoxlsx==True:#默认，只导出txt,xlsx=false 【True只导出xlsx       
    #     导出xlsx
    #     if toxlsx==True:
        # 创建 Excel 工作簿和工作表
        workbook = openpyxl.Workbook()
        worksheet = workbook.active

        # 写入标题
#         worksheet['A1'] = 'num'
#         worksheet['B1'] = 'link'
#         worksheet['C1'] = 'title'

        # 写入链接和标题【列表】到 Excel 文件
        for idx, (num,qikan,link, title) in enumerate(zip(num_list,qikan_list,url_list, title_list), start=1):
            print(link)
            print(title)
            worksheet.cell(row=idx, column=1, value=num)
            worksheet.cell(row=idx, column=2, value=link)
            worksheet.cell(row=idx, column=3, value=title)
            worksheet.cell(row=idx, column=4, value=qikan)


        # 保存 Excel 文件
        workbook.save('E:\\sci\\'+key+'_'+str(page)+'.xlsx')
#         workbook.save('C:\\Users\\hp\\Desktop\\sci\\'+key+'_'+str(page)+'.xlsx')
#         workbook.save('C:\\Users\\ytlds\\\Desktop\\sci\\'+key+'_'+str(page)+'.xlsx')#win10
#         workbook.save('C:\\Users\\King\\Desktop\\sci\\'+key+'_'+str(page)+'.xlsx')
#         workbook.save('C:\\Users\\King\\Desktop\\sci\\'+key+'_'+str(page)+'.xlsx')#Permission denied: 'C:\\Users\\King\\Desktop        
    #     workbook.save(key+'_'+str(page)+'.xlsx')#成功    

#     sleep(30)
# sci(key,num_list,url_list,title_list,abstr_list,titlezh_list,abstrzh_list)
# sci(25,"microfludic preparation sphere",2,False )#【3超出总数2】【】win10成功】
# sci(25,"molecular sieve",1,True)
# 【*****爬虫失效******】

# sci(25,'molecular sieve',1,onlytoxlsx=False)#0824成功
# if __name__ == "__main__":
#     expand()
#     gettotal()
#     writeout(url_list,title_list,abstr_list)
