# -*- codeding = utf-8 -*-
#@Time : 2020/7/3111:19
#@Author : Armor
#@File : SCI.py
#@Software : PyCharm

import os
import csv
import time
import random
import requests
from lxml import etree
from time import  sleep
from bs4 import BeautifulSoup
from threading import Semaphore
from concurrent.futures import ThreadPoolExecutor
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

def slnGetHTMLResponse(url):
    chrome_options = Options()
    chrome_options.add_argument('--disable-gpu')  # 谷歌文档提到需要加上这个属性来规避bug
    chrome_options.add_argument('--hide-scrollbars')  # 隐藏滚动条, 应对一些特殊页面
    chrome_options.add_argument('blink-settings=imagesEnabled=false')  # 不加载图片, 提升速度
    chrome_options.add_argument('--headless')  # 浏览器不提供可视化页面
    driver = webdriver.Chrome(options=chrome_options)
    driver.implicitly_wait(10)
    driver.get(url)
    time.sleep(15)
    html = driver.find_element_by_xpath("//*").get_attribute("outerHTML")
    return html

def getHTMLResponse(url):
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-User': '?1',
        'Sec-Fetch-Dest': 'document',
        'Referer': 'https://search.cn-ki.net/search?keyword=%E7%88%AC%E8%99%AB&db=CFLS&p=13',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',
    }

    try:
        r = requests.get(url,headers=headers,timeout=3600)

        # r.raise_for_status()
        # r.encoding = r.apparent_encoding

        # 无框架延迟处理
        # random_time = random.randint(3,10)
        # sleep(random_time)
        print(r.content.decode())
        return r
    except:
        print("获取html失败")

def getURLs(keyword,page_nums,url_format):
    urls = []
    url_format = "https://search.cn-ki.net/search?keyword={}&p={}"
    for page_num in range(-1,page_nums+1):
        url = url_format.format(keyword,page_num)
        urls.append(url)
    return  urls

def getData(content,csv_file):
    try:
        # 爬取策略
        tree_node = etree.HTML(content)
        blocks = tree_node.xpath('//div[@class="ss-primary ss-primary--reversed"]/ul/li')
        for block, i in zip(blocks, range(0, 100)):
            print(f"第 {i + 1} ：")
            # 标题
            title = block.xpath('./div/div/h2/a')[0].xpath('string(.)').strip()
            print(title)

            # 作者 和 时间
            author_time = block.xpath('./div/div/p/text()')[0]
            print(author_time)

            # AB
            ab_info = block.xpath('./div/div/div/p')[0].xpath('string(.)').strip()
            print(ab_info)

            # DOI
            doi = block.xpath('./div/div/ul/li[1]/text()')
            if doi:
                doi = doi[0]
                print(doi)
            else:
                doi = str(None)
                print(doi)

            # pulish
            pulish = block.xpath('./div/div/ul/li[2]/text()')
            if pulish:
                pulish = pulish[0]
                print(pulish)
            else:
                pulish = str(None)
                print(pulish)

            # 分类
            classes = block.xpath('./div/div/h3/text()')[0]
            print(classes)

            # info
            info_link = block.xpath('./div/div/h2/a/@href')[0]
            print(info_link)

            # pdf

            pdf_link = str(None)
            # pdf_html = getHTMLResponse(info_link + "/tab-pdf")
            # tree_node = etree.HTML(pdf_html.content)
            # pdf_link = tree_node.xpath('//div[@class="panel-pane pane-custom pane-1"]/div/p/a/@href')
            # if pdf_link:
            #     pdf_link = pdf_link[0]
            # else:
            #     pdf_link = str(None)
            print(pdf_link)


            print("------------------------------------------------------------------------")

            csv_file.writerow([title,author_time,ab_info,doi,pulish,classes,info_link,pdf_link])
    except:
        print("获取内容异常")

def getPageNum(keyword,url_format):
    url = url_format.format(keyword,1)
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-User': '?1',
        'Sec-Fetch-Dest': 'document',
        'Referer': 'https://search.cn-ki.net/search?keyword=%E7%88%AC%E8%99%AB&db=CFLS&p=13',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',
    }
    try:
        r = requests.get(url, headers=headers, timeout=15)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        # 使用 策略 获取页码
        page_info = []

        return page_info
    except:
        print("获取页码失败")

def run_savefile(keyword,url_format,FILE_PATH,HEADERS):
    # 计时
    strat = time.time()
    # 1.首次访问网页 获取 页数
    # page_num = getPageNum(keyword,url_format)
    page_num = 100
    # 2. 制作urls集合
    urls = [url_format.format(keyword,page_num)]
    # 3. 读取数据 保存文件
    with open(file=FILE_PATH, mode="w", newline="", encoding="utf-8") as file:
        # 创建csv写入对象
        csv_file = csv.writer(file)
        # 写入头部信息
        csv_file.writerow(HEADERS)
        # 写入数据 先分布式获取页面代码存储，后续在进行读取信息
        contents = []
        with ThreadPoolExecutor(max_workers=1) as executor:
            for url, html in zip(urls, executor.map(slnGetHTMLResponse, urls)):
                contents.append(html)
                print(f"{url} is finished")

        for index in range(len(contents)):
            getData(contents[index],csv_file)
    print("finished end!")
    print("用时：", time.time() - strat)

def run_sci(keyword):
    # url格式化输出
    url_format = 'https://search.sciencemag.org/?searchTerm={}&order=newest&limit=textFields&pageSize={}&&'
    # 保存路径 和 文件名
    FILE_PATH = '.' + os.sep + keyword + '.csv'
    # csv文件头部信息
    HEADERS = ["title",'author_time','ab_info','DOI','pulish','classes','info_link','pdf_link']
    run_savefile(keyword, url_format, FILE_PATH, HEADERS)

if __name__ == '__main__':
    run_sci('machine learning')