from time import sleep
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By

# 注意【】
# 修改【路径；桌面temp文件夹】

import keyboard
# import pymouse
from bs4 import BeautifulSoup
import re
import pyautogui
import sys,traceback
import json

from selenium.webdriver.edge.options import Options# 实现无可视化界面的操作,
options = Options()#定义一个option对象
# options.add_argument("headless")#实现无可视化界面的操作,无可视化界面（无头浏览器）
options.add_argument("--remote-debugging-port=9222")

options.add_argument('--enable-print-browser')  # 启用PrintBrowser模式，其中所有内容都呈现为打印
options.add_argument('--kiosk-printing')  # 在打印预览中自动按下打印按钮
settings = {
    "recentDestinations": [
    {
        "id": "Save as PDF",
        "origin": "local"
    }
    ],
    "selectedDestinationId": "Save as PDF",
    "version": 2
}
prefs = {
    'printing.print_preview_sticky_settings.appState': json.dumps(settings),
#     'savefile.default_directory': 'C:\\Users\\King\\Desktop\\temp'  # 下载文件保存的路径win8
#     'savefile.default_directory': 'C:\\Users\\ytlds\\Desktop\\temp'  # 下载文件保存的路径win10
    'savefile.default_directory': 'C:\\Users\\hp\\Desktop\\temp'
}
options.add_experimental_option('prefs', prefs)
# options.add_argument('--remote-debugging-port=9222')


driver= webdriver.Edge(options = options)


# m=pymouse.PyMouse()#【鼠标位置】
# options = webdriver.EdgeOptions()
# # 此步骤很重要，设置为开发者模式，防止被各大网站识别出来使用了Selenium
# driver = webdriver.Edge(options=options)
# driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            # "source": """
            # Object.defineProperty(navigator, 'webdriver', {
              # get: () => undefined
            # })
            # """
        # })

# driver = webdriver.Edge()定义了option 不能在写这行
# driver.maximize_window()
driver.set_window_size(800,750) # 设置浏览器大小 单位像素
url0="https://www.sciencedirect.com/science/article/pii/S1674200123001293"
# url="https://id.elsevier.com/as/eEC9UJu1PQ/resume/as/authorization.ping?client_id=SDFE-v4"#【会出错！】
driver.get(url0)#

# 获取按钮【通过您的机构登录】点击
login_way_btn = driver.find_element(By.XPATH,'//*[text()="通过您的机构登录"]')
login_way_btn.click()
print("其他机构 click  xpath成功")
sleep(1)
# 输入+点击
driver.find_element(by=By.ID,value='bdd-email').send_keys("east china university of science and technology")
print("输入ecust  成功")
sleep(5)
# 鼠标点击【只能手动】
# m.click(424,680,1)
pyautogui.moveTo(424,680)
pyautogui.click()
print("ECUST选项 click  鼠标成功")
# pyautogui.moveTo(424,100)
# pyautogui.click()

sleep(2)
# keyboard.press_and_release('down')#enter仍然无法直接按【继续】
# sleep(1)
# keyboard.press_and_release('up')
# sleep(3)
# keyboard.press_and_release('enter')
# sleep(5)

# driver.find_element(By.XPATH,'//*[@id="bdd-elsPrimaryBtn2"]/div/div[3]/text()').click()#【it should be element？？】
try:
    login_btn=driver.find_element(By.CLASS_NAME,'els-container-right')#【找不到！！】
    login_btn.click()
    print("机构登录 By.CLASS_NAME成功")
except :
    print("机构登录 By.CLASS_NAME找不到")
#     sleep(5)
    pyautogui.moveTo(300,360)
    pyautogui.click()
    print("机构登录通过ECUST 鼠标成功")
#     pass
# keyboard.press_and_release('pagedown')#下翻都没有反应
# pyautogui.press("pagedown")

# sleep(5)
# pyautogui.moveTo(300,360)
# pyautogui.click()
# print("鼠标 通过ECUST")
                        #【以上都有效了！！！】【不要点鼠标】【pymouse.click无效】


sleep(5)
# 账号
driver.find_element(by=By.ID,value='username').send_keys('Y20170138')
driver.find_element(by=By.ID,value='password').send_keys('289a289bAA')
# 登录方法，只能选一种，否则会出错【回车，自动查找，手动】
# try:
# keyboard.press_and_release('enter')#回车
try:
    login_user_btn=driver.find_element(By.CLASS_NAME,'form-element form-button')#【找不到！！】
    login_user_btn.click()#自动
    print("user pass登录按钮  By.CLASS_NAME失败")
except :
    print("user pass登录按钮  By.CLASS_NAME失败")#的确无法找到！！！
    pyautogui.moveTo(230,655)
    pyautogui.click()
    print("user pass登录按钮  鼠标成功")
    sleep(1)
#     pass
# m.click(230,655,1)#手动
# m.click(230,655,1)#手动
# pyautogui.moveTo(230,655)
# pyautogui.click()
# print("user pass登录按钮  鼠标成功")
# sleep(5)
keyboard.press_and_release('pagedown')
print("下翻")
sleep(1)
# <input type="submit" name="_eventId_proceed" value="Accept">【】???
# /html/body/form/div/div[2]/p[2]/input[2]
try:
    login_user_btn=driver.find_element(By.XPATH,'/html/body/form/div/div[2]/p[2]/input[2]')#【找不到！！】
    login_user_btn.click()#自动
    print("accept按钮  By.XPATH成功")
except :
    print("accept按钮  By.XPATH失败")
    pyautogui.moveTo(444,694)
    pyautogui.click()
    pyautogui.click()
    print("accept按钮  鼠标成功")
#     pass
# pyautogui.moveTo(444,694)
# pyautogui.click()
# print("accept按钮  鼠标成功")
# m.click(444,694,1)#手动
# m.click(444,694,1)#手动
sleep(5)#
print("完成进入sci")

# 以上只登陆
'''
# def get_paper(url):
# 正式打开文献页【覆盖原标签】
url="https://www.sciencedirect.com/science/article/pii/S1674200123001293"
print("开始爬"+url)
driver.get(url)#
html_text=driver.page_source#【必须在展开之后！！！！】
soup=BeautifulSoup(html_text, 'html.parser')
# 获取标题
try:
    vvv=soup.find('span',class_="title-text").text
    print("标题："+vvv)
    # 创建txt
    docfile='C:\\Users\\King\\Desktop\\sci\\'+vvv+'_zh.txt'
    f=open(docfile,"w",encoding="utf-8")#自动创建成功！！
except:
    print("出错,50s后退出")
    sleep(50)
    quit()
# 【title,abstr,paper】
# title
f.write("标题: "+vvv+'\n')#
# f.write("\n")
# 期刊信息
if soup.find("div",class_="publication-volume u-text-center") !=None:
    jourinfos=soup.find("div",class_="publication-volume u-text-center").text.strip()
    print("期刊信息："+jourinfos)
    f.write("期刊信息: "+jourinfos)
    f.write("\n")
else:
    f.write("期刊信息空")
    f.write("\n")

# author
if soup.find("div",class_="author-group")!=None:
    autinfos=soup.find("div",class_="author-group").text.strip()
    autinfos=autinfos.replace("Author links open overlay panel",'')#.text.strip()
    print("作者: "+autinfos)
    f.write("作者: "+autinfos)
    f.write("\n")
else:
    f.write("作者空")
    f.write("\n")
# DOI link
if soup.find('a',class_="anchor doi anchor-default") !=None:
    doi=soup.find('a',class_="anchor doi anchor-default").attrs['href']
    print("doi链接："+doi)
    f.write("doi链接："+doi)
    f.write("\n")
else:
    f.write("doi链接空")
    f.write("\n")
# abstr
infos=soup.find("div",class_="abstract author")
if infos !=None:
    if infos.find("p")!=None:
        abstract=infos.find("p").text.strip()
        print("摘要: "+abstract)
        f.write("摘要: "+abstract)
        f.write("\n")
    else:
        f.write("摘要空")
        f.write("\n")
else:
    f.write("摘要空")
    f.write("\n")
    
# keyword
# <span>Hollow mesoporous silica microspheres (HMSMs)</span>
if soup.find("div",class_="keywords-section")!=None:
    infos=soup.find("div",class_="keywords-section")
    if infos.find("span")!=None:
        keywords=infos.find_all("span")#.text.strip()
        keyw="关键词: "
        for i in keywords:
            keyw=keyw+i.text+";"
        print(keyw)
        f.write(keyw)
        f.write("\n")
    else:
        f.write("关键词空")
        f.write("\n")
else:
    f.write("关键词空")
    f.write("\n")

sleep(5)
# 正文
xxx=soup.find_all('div',class_="Body u-font-gulliver text-s")#逐渐往下级【不能少，必须all，for i in 】
print(xxx)
for xxx1 in xxx:
#   sec1=xxx1.find('section',id='sec2')
# intro
# sec1=xxx1.find('section',id='sec2')
# sec1txt=sec1.text
    print(xxx1)
    if xxx1.find("section",id="sec1") !=None:
        intro=xxx1.find("section",id="sec1")#.text#.strip()#【首尾去指定字符（空格、换行符）】
        ec1txt=intro.text
        print("引言：")#+intro)
        f.write("引言: "+intro)
        f.write("\n")
    else:
        print("正文出错")
        f.write("引言空")
        f.write("\n")

    # expri
    if xxx1.find("section",id="sec2") !=None:
        expri=xxx1.find("section",id="sec2").text#.strip()#【首尾去指定字符（空格、换行符）】
        print("实验：")#+expri)
        f.write("实验: "+intro)
        f.write("\n")
    else:
        f.write("实验空")
        f.write("\n")
        
    # disc
    if xxx1.find("section",id="sec3") !=None:
        disc=xxx1.find("section",id="sec3").text#.strip()#【首尾去指定字符（空格、换行符）】
        print("讨论：")#+disc)
        f.write("讨论: "+disc)
        f.write("\n")
    else:
        f.write("讨论空")
        f.write("\n")
        
    # conc
    if xxx1.find("section",id="sec4") !=None:
        conc=xxx1.find("section",id="sec4").text#.strip()#【首尾去指定字符（空格、换行符）】
        print("结论：")#+conc)
        f.write("结论: "+conc)
        f.write("\n")
    else:
        f.write("结论空")
        f.write("\n")

# 参考文献
sleep(3)
# refe
yyy=soup.find('div',class_="Body u-font-gulliver text-s")
if yyy.find("ol",class_="references") !=None:
    refe=yyy.find("ol",class_="references").text#.strip()#【首尾去指定字符（空格、换行符）】
    print("参考文献："+refe)
    f.write("参考文献: "+refe)
    f.write("\n")
else:
    f.write("参考文献空")
    f.write("\n")
# 右侧推荐文献：u-show-from-md col-lg-6 col-md-8 pad-right u-padding-s-top    
    
    
# xxx=soup.find_all('div',class_="Body u-font-gulliver text-s")#逐渐往下级
# sleep(5)
# try:
#     for xxx1 in xxx:
#         sec1=xxx1.find('section',id='sec2')
#         sec1txt=sec1.text
#         f.write(sec1txt)
#         print(sec1.text)#【上次成功，这次 'NoneType' object has no attribute 'text'】
#         print("获取文本成功")
#         f.close()
# except :
#     print("获取文本失败")
#     pass
sleep(5)
f.close()
# 获取页面源代码
# html_source = driver.page_source
# try:
#     with open('C:\\Users\\King\\Desktop\\保存源码.txt','a+',encoding="utf-8") as f:## a+ 模式会保留原内容，并在文件末尾添加内容
#           f.write(sec1txt)
#     print("保存txt成功")
# except :
#     print("保存txt失败")
'''
