# -*- coding: utf-8 -*-
import json
import re
from time import sleep, time
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from config import url,university,username,password,keyword,countrys
import requests
from lxml import etree
from retrying import retry
import openpyxl

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
datas_file = os.path.join(BASE_DIR, "datas_re.xlsx")
workbook = openpyxl.load_workbook(datas_file)
sheet = workbook['wos']

wait_time = 60
wait_time0 = 3
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
}


@retry(stop_max_attempt_number=5)
def requests_url(detail_url):
    resp = requests.get(detail_url, headers=headers, timeout=30)
    if resp.status_code != 200:
        sleep(5)
        raise Exception
    html = etree.HTML(resp.content.decode())
    return html


def parse_data(html, i):
    datas_list = []
    title = html.xpath('//div[@class="title"]//text()')
    title = "".join(title).replace("\n", "").replace("        ", "")
    label_list = html.xpath("//span[text()='作者:']/..//text()")
    label = []
    for la in label_list:
        if (re.search(" \(",la)):
            label.append(la)
    if len(label) > 3:
        label = label[:3]
    label_str = "".join(label)
    year = html.xpath('//span[contains(text(),"出版年")]/..//text()')
    year = year[2] if len(year) > 2 else ""
    type = html.xpath('//*[text()="文献类型:"]/../text()')
    type = type[1] if len(type) > 1 else ""
    fr_label = html.xpath('//span[contains(text(),"通讯作者地址:")]/..//text()')
    fr_label = fr_label[2].split(" (")[0].strip() if len(fr_label) > 2 else ""
    if fr_label in label_list:
        fr_label = label_list[label_list.index(fr_label)+1]
    fr_address1 = html.xpath('//table[@class="FR_table_noborders"][1]//tr/td[2]/text()')
    fr_address2 = html.xpath('//table[@class="FR_table_noborders"][1]//tr/td[2]/span[1]/text()')
    fr_address1 = fr_address1[0] if fr_address1 else ""
    fr_address = fr_address1 + fr_address2[0] + "." if fr_address2 else fr_address1
    if not fr_address:
        fr_address = html.xpath('//*[text()="作者信息"]/../p[1]/span[2]/text()')
        fr_address = fr_address[0] if fr_address else ""
    fr_address_1_1 = html.xpath('//table[@class="FR_table_noborders"][2]//tr[1]//a/text()')
    fr_address_1_2 = html.xpath('//table[@class="FR_table_noborders"][2]//tr[1]//a/span/text()')
    fr_address_1_1 = fr_address_1_1[0] if fr_address_1_1 else ""
    fr_address_1 = fr_address_1_1 + fr_address_1_2[0] + "." if fr_address_1_2 else fr_address_1_1
    large_number = html.xpath('//div[@class="flex-row flex-justify-start flex-align-start box-div"]//span[@class="large-number"]/text()')
    large_number = large_number[0] if large_number else ""
    publisher = html.xpath('//div[text()="出版商"]/../p/value/text()')
    publisher = publisher[0] if publisher else ""
    fund_links = html.xpath('//th[text()="基金资助机构"]/../../tr[@class="fr_data_row"]')
    fund_link_list = []
    for tr in fund_links:
        td = tr.xpath('./td[1]/p/text()')
        if not td:
            td = tr.xpath('./td[1]/text()')
        if td:
            fund_link_list.append(td[0])
    fund_link = "+".join(fund_link_list)
    PubMed_ID = html.xpath('//*[text()="PubMed ID:"]/../value/text()')
    PubMed_ID = PubMed_ID[0] if PubMed_ID else ""
    datas_list.append(title)
    datas_list.append(label_str)
    datas_list.append(year)
    datas_list.append(type)
    datas_list.append(fr_label)
    datas_list.append(fr_address)
    datas_list.append(fr_address_1)
    datas_list.append(large_number)
    datas_list.append(PubMed_ID)
    datas_list.append(fund_link)
    datas_list.append(publisher)
    print(i, datas_list)
    if datas_list.count("") == 11:
        return True
    for col in range(1, len(datas_list)+1):
        sheet.cell(row=i+1, column=col, value=datas_list[col-1])
        workbook.save(datas_file)


def run():
    options = webdriver.ChromeOptions()
    options.add_argument('--no-sandbox')
    dr = webdriver.Chrome(options=options)
    dr.maximize_window()
    dr.get(url)
    # 选择机构登录
    opt = WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//select[@id="shibSelect"]'))
    Select(opt).select_by_visible_text("CHINA CERNET Federation")
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//button[@id="shibSubmit"]')).click()
    # 输入学校名称
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//input[@id="idpSelectInput"]')).send_keys(university)
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//input[@id="idpSelectSelectButton"]')).click()
    # 登录
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//input[@id="username"]')).send_keys(username)
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//input[@id="password"]')).send_keys(password)
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//button[@class="form-element form-button"]')).click()
    sleep(2)
    WebDriverWait(dr, wait_time).until(
            EC.element_to_be_clickable((By.XPATH, '//input[@value="同意"]'))).click()
    sleep(3)
    # 高级检索
    WebDriverWait(dr, wait_time).until(
            EC.element_to_be_clickable((By.XPATH, '//div[@class="searchtype-sub-nav"]//a[text()="高级检索"]'))).click()
    # 输入关键词
    WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//div[@class="AdvSearchBox"]/textarea[@id="value(input1)"]')).send_keys(keyword)
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//div[@class="AdvSearchBox"]//button[@id="search-button"]'))).click()
    # 点击检索结果
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//div[@class="block-history"]//div[@id="set_1_div"]/a[@id="hitCount"]'))).click()
    # 选择出版年
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//a[@id="PublicationYear"]'))).click()
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//input[@value="PublicationYear_2014"]'))).click()
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//td[@class="ra_button_row"]//button[@title="精炼"]'))).click()
    # 选择国家
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//div[@class="moreRAOptsLink"]/a'))).click()
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//div[@id="CountryTerritory"]/h4/a'))).click()
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//div[@id="CountryTerritory"]//div[@id="CountryTerritory_tr"]//a[@title="更多精炼检索结果"]'))).click()
    for country in countrys:
        country = country.upper()
        WebDriverWait(dr, wait_time).until(
            EC.element_to_be_clickable((By.XPATH, '//input[@value="CountryTerritory_{}"]'.format(country)))).click()
    WebDriverWait(dr, wait_time).until(
        EC.element_to_be_clickable((By.XPATH, '//td[@class="ra_button_row"]//button[@title="精炼"]'))).click()
    sleep(5)
    # 提取数据
    total_num = WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//*[@id="hitCount.top"]')).text
    total_num = int(total_num.replace(",", ""))
    detail_herf = WebDriverWait(dr, wait_time).until(
        lambda driver: dr.find_element_by_xpath('//div[@id="RECORD_1"]//a[@class="smallV110 snowplow-full-record"]')).get_attribute("href")
    # print(detail_herf)
    for i in range(1, total_num + 1):
        page = i // 10 + 1
        detail_url = detail_herf.replace("page=1&doc=1", "page={}&doc={}".format(page, i)) + "&locale=zh_CN"
        # requests请求获取数据
        try:
            html = requests_url(detail_url)
        except Exception as e:
            print(e)
            break
        if parse_data(html, i):
            break
    dr.quit()


if __name__ == '__main__':
    start = time()
    run()
    end = time()
    print("delta耗时:{}S".format(end-start))