# -*- coding: utf-8 -*-
from urllib import request
import urllib.parse as parse
from bs4 import BeautifulSoup
import sys
import baidusearch
# from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
import excel
# # 设置选项：无窗口后台运行
# chrome_options = Options()
# chrome_options.add_argument('--headless')

# 取得driver
# browser = webdriver.Chrome("/usr/local/bin/chromedriver",chrome_options=chrome_options)

import time
# argGroup = sys.argv[1:];
# # 根据python的参数来生成查询对象
# values = {"pn": "0", "tn": "SE_baiduxueshu_c1gjeupa", "id": "utf-8", "sc_hit": "1"}
# for arg in argGroup:
#     attribute = arg.split("=")[0];
#     value = arg.split("=")[1];
#     values[attribute] = value
# print(values)
# headers = {
#     "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
#     # "Accept-Encoding": "gzip, deflate, br",
#     "Accept-Language": "zh-CN,zh;q=0.9",
#     # "Connection": "keep-alive",
#     "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
#     # "X-Requested-With": "XMLHttpRequest"
#     "Host":"xueshu.baidu.com"
# }
# url = "https://xueshu.baidu.com"
# browser.get(url)
# time.sleep(10)
# browser.find_element_by_class_name('s_ipt').send_keys(values["wd"])
# time.sleep(2)
# browser.find_element_by_class_name('s_btn').click()
# time.sleep(2)
# content = browser.page_source.encode("utf-8")
# browser.close()
# soup = BeautifulSoup(content)
# print(soup.prettify())
# values = {"wd": "123", "pn": "0", "tn": "SE_baiduxueshu_c1gjeupa", "id": "utf-8", "sc_hit": "1"}
# data = parse.urlencode(values)
# req = request.Request(url, headers=headers)
# result = request.urlopen(req)
# the_page = result.read()
# gzip压缩过的数据 对我们接收的字节码进行一个解码操作 它是以"b’\x1f\x8b\x08"开头

# print(the_page)

# soup = BeautifulSoup(the_page)
# print(soup.prettify())
# print(soup.findAll(id='kw'))
# baidusearch.get_baidu_list(soup);

def run_xueshu_page(wd,pageName,browser):
    print(1111)
    # argGroup = sys.argv[1:];
    # 根据python的参数来生成查询对象
    # values = {"pn": "0", "tn": "SE_baiduxueshu_c1gjeupa", "id": "utf-8", "sc_hit": "1"}
    # for arg in argGroup:
    #     attribute = arg.split("=")[0];
    #     value = arg.split("=")[1];
    #     values[attribute] = value
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        # "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        # "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
        # "X-Requested-With": "XMLHttpRequest"
        "Host": "xueshu.baidu.com"
    }
    # 获取百度学术的页面信息
    url = "https://xueshu.baidu.com"
    browser.get(url)
    time.sleep(10)
    browser.find_element_by_class_name('s_ipt').send_keys(wd)
    time.sleep(2)
    browser.find_element_by_class_name('s_btn').click()
    time.sleep(2)
    content = browser.page_source.encode("utf-8")
    # browser.close()
    # browser.quit()
    soup = BeautifulSoup(content)
    # 获取所有文章的url链接
    url_list = baidusearch.get_baidu_list(soup,page=pageName)
    if len(url_list) == 0:
        print("没有找到相关的文献")
        return
    page_detail_list = []
    # 查询具体内容并输出到page_detail_list
    for url in url_list:
        detail_info = baidusearch.get_url_page_info(url)
        page_detail_list.append(detail_info)

    # 数据导出成excel
    result = excel.create_excel(page_detail_list,wd)
    return result;


