# 先运行 0812打开edge-sci登录【模板】【端口9222】
# 手输保存地址、下载地址【保存文件名71字符+】
# 0812selenium+bs4【端口9222】爬取sci-模板1.py
from time import sleep
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import keyboard
# import pymouse
from bs4 import BeautifulSoup
import re
import pyautogui
import PySimpleGUI as sg
import openpyxl
import sys,traceback
from _getpaper_zh_pdf_1 import getpaper_zh

from selenium.webdriver.edge.options import Options# 实现无可视化界面的操作,
options = Options()#定义一个option对象
# options.add_argument("headless")#实现无可视化界面的操作,无可视化界面（无头浏览器）
#第一次打开的
# options.add_argument("--remote-debugging-port=9222")
# 之后测试的
options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
driver= webdriver.Edge(options = options)


# foldername=input("C:\\Users\\King\\Desktop\\sci\\输入子文件夹名称：")
# 1、单文献下载
# url=input("请输入文献地址：")
# 2、列表法
# url_list=["https://www.sciencedirect.com/science/article/pii/S0021967323004168",
# "https://www.sciencedirect.com/science/article/pii/S0030401823001177"]
# for url in url_list:
# #     url=list[i]
#     print(url)
#     get_paper(foldername,url)

def show_xlsx_data(file_path):#显示xlsx数据
    wb = openpyxl.load_workbook(file_path)#选择的文件路径
    sheet = wb.active

# 方法1、、0812导入xlsx-显示列表-table，一行导入data【能显示第一行数据】
    data = []
    data = [[cell.value for cell in row] for row in sheet.iter_rows(values_only=False )]#True时，无法读取数字
#     print(data)#打印成功
    headings=data[0]
#     print(headings)#打印成功
    window['-TABLE-'].update(values=data)
    
def get_paper(foldername,url):
    # 正式打开文献页【覆盖原标签】
#     url="https://www.sciencedirect.com/science/article/pii/S1674200123001293"
    print("开始爬"+url)
    driver.get(url)#
    html_text=driver.page_source#【必须在展开之后！！！！】
    soup=BeautifulSoup(html_text, 'html.parser')
    # 获取标题
    try:
        vvv=soup.find('span',class_="title-text").text
        print("标题："+vvv)#替换/
        vvv1=vvv.replace("/","")
        vvv1=vvv1.replace(":","")
        # 创建txt,【但无法创建文件夹】
#         subfolder='C:\\Users\\King\\Desktop\\sci\\'+foldername
        subfolder=foldername#已经有保存目录
        if not os.path.exists(subfolder):
            os.makedirs(subfolder)
        docfile=subfolder+"\\"+vvv1[:70]+'_en.txt'# 取字符串左边第一个到71个字符
        f=open(docfile,"w",encoding="utf-8")#自动创建成功！！
    except:
        print("出错,网址写入桌面.出错.txt")
        sleep(5)
#         f=open('C:\\Users\\King\\Desktop\\出错.txt',"a+",encoding="utf-8")win8
#         f=open('C:\\Users\\ytlds\\Desktop\\出错.txt',"a+",encoding="utf-8")#win10
        f=open('C:\\Users\\hp\\Desktop\\出错.txt',"a+",encoding="utf-8")        
        f.write(url+"\n")
        f.close()
        pass
#         pass 
#         quit()#直接关闭程序
#         break#跳出循环？'break' outside loop
    
    # 【title,abstr,paper】
    # title
    f.write("标题: "+vvv+'\n')#
    # f.write("\n")
    # 期刊信息
    if soup.find("div",class_="publication-volume u-text-center") !=None:
        jourinfos=soup.find("div",class_="publication-volume u-text-center").text.strip()
        print("期刊信息：正在输出")#+jourinfos)
        f.write("期刊信息: "+jourinfos)
        f.write("\n")
    else:
        f.write("期刊信息空")
        f.write("\n")

    # author
    if soup.find("div",class_="author-group")!=None:
        autinfos=soup.find("div",class_="author-group").text.strip()
        autinfos=autinfos.replace("Author links open overlay panel",'')#.text.strip()
        print("作者: 正在输出")#+autinfos)
        f.write("作者: "+autinfos)
        f.write("\n")
    else:
        f.write("作者空")
        f.write("\n")
    # DOI link
    if soup.find('a',class_="anchor doi anchor-default") !=None:
        doi=soup.find('a',class_="anchor doi anchor-default").attrs['href']
        print("doi链接："+doi)
        f.write("doi链接："+doi)
        f.write("\n")
    else:
        f.write("doi链接空")
        f.write("\n")
    # abstr
    infos=soup.find("div",class_="abstract author")
    if infos !=None:
        if infos.find("p")!=None:
            abstract=infos.find("p").text.strip()
            print("摘要: 正在输出")#"+abstract)
            f.write("摘要: "+abstract)
            f.write("\n")
        else:
            f.write("摘要空")
            f.write("\n")
    else:
        f.write("摘要空")
        f.write("\n")
        
    # keyword
    # <span>Hollow mesoporous silica microspheres (HMSMs)</span>
    if soup.find("div",class_="keywords-section")!=None:
        infos=soup.find("div",class_="keywords-section")
        if infos.find("span")!=None:
            keywords=infos.find_all("span")#.text.strip()
            keyw="关键词: "
            for i in keywords:
                keyw=keyw+i.text+";"
            print("关键词 正在输出")#keyw)
            f.write(keyw)
            f.write("\n")
        else:
            f.write("关键词空")
            f.write("\n")
    else:
        f.write("关键词空")
        f.write("\n")
        
        
    pyautogui.moveTo(100,250)
    pyautogui.click()
    sleep(2)
    # 重新soup并下翻，成功获取
    for i in range(5):
        pyautogui.press("pagedown")
        pyautogui.press("pagedown")
    sleep(5)
    
    # 正文
    html_text=driver.page_source#【必须在展开之后！！！！】
    soup=BeautifulSoup(html_text, 'html.parser')
    
# 0812selenium+bs4【端口9222】爬取sci-模板1
    try:
    #     if soup.find("div",class_="Body u-font-gulliver text-s") !=None:【应该都有正文啊】
        zhengwen=soup.find("div",class_="Body u-font-gulliver text-s")
        # for zhengwen1 in zhengwen:#一级一级往下
        if zhengwen.find_all('section') !=None:
            for zhengwen1 in zhengwen.find_all('section'):
                print('节'+zhengwen1.get('id'))# sec0001 sec0002 sec0003 sec0004 sec0005 sec0006 sec0007 sec0007 acoi0001 ack0001
                if zhengwen1.find("h2",class_="u-h4 u-margin-l-top u-margin-xs-bottom") !=None:
                    for zhengwen1title in zhengwen1.find("h2",class_="u-h4 u-margin-l-top u-margin-xs-bottom"):#一节只有一个节标题h2只是标题；
                        print('节标题：'+zhengwen1title.text)#+'\n')#
                        f.write('节标题：'+zhengwen1title.text+'\n')
#                     for zhengwen1para in zhengwen1.find_all('p',attrs={'id':True}):
#     #                     print('段输出'+zhengwen1para.get('id'))#+'\n')
#             #             print(zhengwen1para.text+'\n')
#                         f.write(zhengwen1para.text+'\n')                        
#                 if zhengwen1.find("h3",class_="u-h4 u-margin-m-top u-margin-xs-bottom") !=None:
#                     for zhengwen2title in zhengwen1.find("h3",class_="u-h4 u-margin-m-top u-margin-xs-bottom"):#一节只有一个节标题h3只是标题
#                         print('子节标题：'+zhengwen2title.text)#+'\n')#
#                         f.write('子节标题：'+zhengwen2title.text+'\n')
                    for zhengwen1para in zhengwen1.find_all('p',attrs={'id':True}):
    #                     print('段输出'+zhengwen1para.get('id'))#+'\n')
            #             print(zhengwen1para.text+'\n')
                        f.write(zhengwen1para.text+'\n')
    #             else:
    #                 print("h2 空")
        else:
            print("section 空")
        #     f.write("节空")
        #     f.write("\n")
    except Exception as e:
        print(e)
        print("爬正文，节，段出错")
    #     pass
            
    
    
    # 参考文献
    pyautogui.press("end")
    pyautogui.press("end")
    sleep(3)
    # refe
    html_text=driver.page_source#【必须在展开之后！！！！】
    soup=BeautifulSoup(html_text, 'html.parser')
    if soup.find("ol",class_="references") !=None:
        refe=soup.find("ol",class_="references").text#.strip()#【首尾去指定字符（空格、换行符）】
        refe=refe.replace("Google Scholar","\n")
        refe=refe.replace("View PDF","")
        refe=refe.replace("CrossRef","")
        refe=refe.replace("View in Scopus","")
        print("参考文献：正在输出")
        f.write("参考文献: "+"\n"+refe)
        f.write("\n")
    else:
        print("参考文献空")
        f.write("参考文献空")
        f.write("\n")
    
# 0812selenium+bs4【端口9222】爬取sci-模板1
    # 【可用】【】图【链接+图尾】
    try:
        if soup.find("div",class_="Body u-font-gulliver text-s") !=None:
            zhengwen=soup.find("div",class_="Body u-font-gulliver text-s")
            # for zhengwen1 in zhengwen:#一级一级往下
            for tu in zhengwen.find_all('figure',class_="figure text-xs"):
                # 尝试3、【成功，比如先定义tulink=soup.find_all()，再for循环，for i in tulink： print(i.get('href'))】
                # 【重复，高清+正常】【高清：文件名+'_lrg'】large resource
                # for tulink in tu.find_all("a",attrs={'href':True}):#<figure><span><ol><li><a href>
                # tulink=tu.find_all("a",attrs={'href':True})
                # for i in tulink:
                    # print(i.get('href'))
                    # print('href')
                    # print('图链接：'+tulink.get('href'))#+'   '+tulink.get('src')+'\n')#
                    # f.write('图链接：'+tulink.get('href'))#+'   '+tulink.get('src')+'\n')
                # 尝试2--1、【成功，比如先定义+for】【可以非href】
                # 【一张图，正常】
                tulink = tu.find_all("img",attrs={'src':True})#<figure><span><img>
                for i in tulink:
    #                 print('图：'+i.get('alt')+'   '+i.get('src')+'\n')#
                    f.write('图：'+i.get('alt')+'   '+i.get('src').replace('','')+'\n')        
                
                # 图尾一般只有一段
                # 方法1、
                for tutitle in tu.find_all('span',class_='captions text-s'):
    #                 print(tutitle.text)
                    f.write(tutitle.text+'\n')
            print("正在爬图")
        else:
            print("图空")
        #     f.write("图空")
        #     f.write("\n")
    except Exception as e:
        print(e)
        print("爬图出错")

#     sg.popup("下载完成")
    try:
        f.close()
    except Exception as e:
        print(e)
        print("关闭文件出错")
#         pass 


    print("*********下载完成*********")         
        
        
        
        
# 定义界面布局【】一个[]一行
layout = [
#     [sg.FileBrowse(file_types=(("TXT Files", "*.txt"), ("ALL Files", "*.*")))],#打开浏览
    [sg.Button('初始化X'),sg.Button('清空')],
    [sg.Text('文献地址: '), sg.Input(default_text='sci地址',size=(50,1),key='-URL-'),sg.Button('添加至列表')],
    [sg.Text('选择 xlsx 文件：'),
    sg.Input(key='-FILE-', enable_events=True), sg.FileBrowse('选择'),sg.Button('可多行添加至列表')],
    [sg.Table(values=[[' ',' ',' ']], headings=['A    ','B                                     ','C                      '], max_col_width=40,key='-TABLE-', enable_events=True, select_mode=sg.TABLE_SELECT_MODE_EXTENDED)],

    
    [sg.Button('单篇下载txtX'),sg.Button('多篇下载txt'),sg.Button('多篇下载zh.pdf(保存目录由9222决定)')],
#     [sg.Text('保存目录'), sg.Input(default_text='C:\\Users\\King\\Desktop\\sci\\',size=(60,1),key='-PATH-'),
#      sg.Button('选择目录')],
#     [sg.Text('保存目录'),sg.Input(default_text='C:\\Users\\King\\Desktop\\sci\\',size=(60,1),key='-FOLDER-'), sg.FolderBrowse()],#要一行里才有关联？？？],#打开浏览文件
    [sg.Text('保存目录'),sg.Input(default_text='C:\\Users\\hp\\Desktop\\temp\\',size=(60,1),key='-FOLDER-'), sg.FolderBrowse()],#要一行里才有关联？？？],#打开浏览文件win10
    [sg.Multiline(size=(80, 5), key='-URLLIST-', disabled=True, autoscroll=True)],
]#显示文本框
# 创建窗口
window = sg.Window('sciencedirect.com 【具体文献】爬链接+标题+摘要txt【】桌面\\sci', layout)
#窗口置顶
window.Finalize()
window.TKroot.attributes('-topmost', 1)

paperurl_list=[]
output_text=""
# 事件循环
while True:
    event, values = window.read()
    if event == sg.WINDOW_CLOSED:
        break
        quit()
    if event == '初始化':
        # def init_sci():#【不能写函数，否则会关闭浏览器】
        driver = webdriver.Edge()
        # driver.maximize_window()
        driver.set_window_size(800,750) # 设置浏览器大小 单位像素
        url0="https://www.sciencedirect.com/science/article/pii/S1674200123001293"
        # url="https://id.elsevier.com/as/eEC9UJu1PQ/resume/as/authorization.ping?client_id=SDFE-v4"#【会出错！】
        driver.get(url0)#

        # 获取按钮【通过您的机构登录】点击
        login_way_btn = driver.find_element(By.XPATH,'//*[text()="通过您的机构登录"]')
        login_way_btn.click()
        print("其他机构 click  xpath成功")
        sleep(1)
        # 输入+点击
        driver.find_element(by=By.ID,value='bdd-email').send_keys("east china university of science and technology")
        print("输入ecust  成功")
        sleep(5)
        # 鼠标点击【只能手动】
        # m.click(424,680,1)
        pyautogui.moveTo(424,680)
        pyautogui.click()
        print("ECUST选项 click  鼠标成功")
        # pyautogui.moveTo(424,100)
        # pyautogui.click()
        sleep(2)

        # driver.find_element(By.XPATH,'//*[@id="bdd-elsPrimaryBtn2"]/div/div[3]/text()').click()#【it should be element？？】
        try:
            login_btn=driver.find_element(By.CLASS_NAME,'els-container-right')#【找不到！！】
            login_btn.click()
            print("机构登录 By.CLASS_NAME成功")
        except :
            print("机构登录 By.CLASS_NAME找不到")
        #     sleep(5)
            pyautogui.moveTo(300,360)
            pyautogui.click()
            print("机构登录通过ECUST 鼠标成功")

                                #【以上都有效了！！！】【不要点鼠标】【pymouse.click无效】
        sleep(2)
        # 账号
        driver.find_element(by=By.ID,value='username').send_keys('Y20170138')
        driver.find_element(by=By.ID,value='password').send_keys('289a289bAA')
        # 登录方法，只能选一种，否则会出错【回车，自动查找，手动】
        # try:
        # keyboard.press_and_release('enter')#回车
        try:
            login_user_btn=driver.find_element(By.CLASS_NAME,'form-element form-button')#【找不到！！】
            login_user_btn.click()#自动
            print("user pass登录按钮  By.CLASS_NAME失败")
        except :
            print("user pass登录按钮  By.CLASS_NAME失败")#的确无法找到！！！
            pyautogui.moveTo(230,655)
            pyautogui.click()
            print("user pass登录按钮  鼠标成功")
            sleep(1)
            
        # sleep(5)
        keyboard.press_and_release('pagedown')
        print("下翻")
        sleep(1)
        # <input type="submit" name="_eventId_proceed" value="Accept">【】???
        # /html/body/form/div/div[2]/p[2]/input[2]
        try:
            login_user_btn=driver.find_element(By.XPATH,'/html/body/form/div/div[2]/p[2]/input[2]')#【找不到！！】
            login_user_btn.click()#自动
            print("accept按钮  By.XPATH成功")
        except :
            print("accept按钮  By.XPATH失败")
            pyautogui.moveTo(444,694)
            pyautogui.click()
            pyautogui.click()
            print("accept按钮  鼠标成功")
        sleep(5)#
        print("完成进入sci")
        sg.popup('初始化完成！')
    if event =='添加至列表':
#         paperurl_list=[]
        paperurl_list.append(values['-URL-'])
        print(paperurl_list)
        output_text=output_text+values['-URL-']+"\n"#需要换行？？
        window['-URLLIST-'].update(output_text)
    if event =='清空':
        window['-URLLIST-'].update('')
        paperurl_list=[]
        output_text=""
        print(paperurl_list)
    if event == '单篇下载txt':
        try:
            foldername=values['-FOLDER-']
            url=values['-URL-']
            get_paper(foldername,url)
            sg.popup("下载完成")
        except:
            print("单篇下载出错")
#             break
            pass
    if event == '多篇下载txt':#是根据paperurl_list
        try:
            foldername=values['-FOLDER-']
            for i in paperurl_list:
                get_paper(foldername,i)
#             sg.popup("下载完成")
        except Exception as e:
            print("多篇下载出错")
            print(e)
#             break#直接会关闭
            pass
    if event == '多篇下载zh.pdf(保存目录由9222决定)':#是根据paperurl_list
        try:
            foldername=values['-FOLDER-']
            for i in paperurl_list:
                getpaper_zh(i)#在桌面
#             sg.popup("下载完成")
        except Exception as e:
            print("多篇下载出错")
            print(e)
#             break#直接会关闭
            pass
    if event == '-FILE-':
        file_path = values['-FILE-']
        if file_path.lower().endswith('.xlsx'):
            show_xlsx_data(file_path)
        else:
            sg.popup('请选择一个 xlsx 文件！')
            window['-FILE-'].update('')

    if event == '-TABLE-':#【有问题！】【table.get()成功】
        try:
            selected_rows = values['-TABLE-']
            selected_data = [window['-TABLE-'].get()[i] for i in selected_rows]#选择的行的数据
#             print(selected_data)#是嵌套列表[['1','wo']]
#             text = ''
            for row in selected_data:
            # 1、输出选中行的列表
                # text += ', '.join([str(column) for column in row]) + '\n'
            # 2、输出选中行要用的元素
#                 text +=row[1]+'\n'
                paperurl_list.append(row[1])#点击就会自动加入列表
#             print(paperurl_list)
#             window['-URLLIST-'].update(text)#在文本框里显示选中的url            
        except Exception as e:
            print(e)
    if event =='可多行添加至列表':
#         paperurl_list=[]            
#         paperurl_list.append(values['-URL-'])
#         print(paperurl_list)
#         output_text=output_text+values['-URL-']+"\n"#需要换行？？
        output_text=""
        res = []
        [res.append(i) for i in paperurl_list if i not in res]
        paperurl_list=res
        print(paperurl_list)
        for i in paperurl_list:
            output_text=output_text+i+"\n"
        window['-URLLIST-'].update(output_text)
#     if event == '选择目录':
#         sg.FileBrowse(file_types=("ALL Files", "*.*")),#打开浏览
# 关闭窗口
window.close()
# m=pymouse.PyMouse()#【鼠标位置】
# options = webdriver.EdgeOptions()
# # 此步骤很重要，设置为开发者模式，防止被各大网站识别出来使用了Selenium
# driver = webdriver.Edge(options=options)
# driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            # "source": """
            # Object.defineProperty(navigator, 'webdriver', {
              # get: () => undefined
            # })
            # """
        # })
