import urllib.parse

import requests
from bs4 import BeautifulSoup
from urllib import parse
import json
import time
import excel

#获取文章链接集合
def get_baidu_list(soup, page):
    page = int(page)
    # 找到的是第二页的a href
    if len(soup.select("#page > a")):
        fir_page = "http://xueshu.baidu.com" + soup.select("#page > a")[0]["href"]
        # 文章链接list
        urls_list = []
        for num in range(page):
            # 每一页np都是10个递增的
            # 替换掉np=10从第一页开始
            next_page = fir_page.replace("pn=10", "pn={:d}".format(num * 10))
            print(next_page)
            # print(next_page)
            content = requests.get(next_page)
            new_soup = BeautifulSoup(content.text)
            # 遍历查找文章链接
            for ele in new_soup.find_all("h3", class_="c_font"):
                urls_list.append(ele.select('a')[0]["href"])
        return urls_list
    else:
        return []
# 获取链接文章的详情页面 BeautifulSoup实现
def get_url_page_info(url):
    def trim(str):
        str = str.replace('\n', '').replace('\r', '')
        str = str.replace(' ', '')
        return str
    # 生成button_tp匹配
    def get_label_str_step(label):
        return "p[data-click~=" + '"' + "{" + "'button_tp'" + ":" + "'" + label + "'" + "}" + '"' + "]"""

    def get_a_label_str_step(label):
        return "a[data-click~=" + '"' + "{" + "'button_tp'" + ":" + "'" + label + "'" + "}" + '"' + "]"""
    content = requests.get(url)
    info_soup = BeautifulSoup(content.text)
    # 标题
    title = info_soup.select('.main-info > h3 > *')
    if(len(title)):
        title = title[0].get_text()
        title = trim(title)
    else:
        title = ""
    # 作者
    author = map(lambda A: A.get_text(), info_soup.select('.c_content > .author_wr > p.author_text > span > a'))
    author = ','.join(author)
    # 发明人
    inventor = map(lambda A: A.get_text(), info_soup.select('.c_content > .author_wr > p.kw_main_l > span > a'))
    inventor = ','.join(inventor)
    # 摘要
    abstract = ''
    if (len(info_soup.select('.c_content > .abstract_wr > p.abstract'))):
        abstract = info_soup.select('.c_content > .abstract_wr > p.abstract')[0].get_text()
    # 公告号
    announcement_no = list(info_soup.select(get_label_str_step("pubid")))
    if (len(announcement_no)):
        announcement_no = announcement_no[0].get_text()
    # 公告时间
    announcement_data = list(info_soup.select(get_label_str_step("paper_published_time")))
    if len(announcement_data):
        announcement_data = announcement_data[0].get_text()
    # 类型
    article_type = list(info_soup.select(get_label_str_step("patent_type")))
    if len(article_type):
        article_type = article_type[0].get_text()
        # # 年份
        # year = announcement_data[0:4]
        # # 月份
        # mouth = announcement_data[4:6]
        # # 日
        # day = announcement_data[6:]
    year = list(info_soup.select(get_label_str_step("year")))
    if len(year):
        year = year[0].get_text()
        year = trim(year)
    # 关键字
    key_word = map(lambda A: A.get_text(),list(info_soup.select(get_label_str_step("keyword") + " span > a")))
    key_word = " ".join(key_word)
    # 申请(专利)号
    appid = list(info_soup.select(get_label_str_step("appid")))
    if len(appid):
        appid = appid[0].get_text()
    # 引用数
    qutoCount = list(info_soup.select(get_a_label_str_step("sc_cited")))
    print(94,qutoCount)
    if len(qutoCount):
        qutoCount = qutoCount[0].get_text()
    else:
        qutoCount = "0"
    # gbt文献
    # 解析url 生成参数dict
    url_params = parse.parse_qs(parse.urlparse(url).query)
    paperid = url_params["paperid"][0]
    params = {
        "paperid": paperid,
        "type":"cite"
    }
    quto_page = requests.get("https://xueshu.baidu.com/u/citation?" + parse.urlencode(params))
    # 将json转为字典的格式 requests自带
    print(111,quto_page.json())
    quto_page_dict = quto_page.json()
    # sc_APA sc_MLA
    gbt = quto_page_dict["data"]
    if "sc_GBT7714" in gbt:
        gbt = gbt["sc_GBT7714"]
    else:
        gbt = ""
    info_obj = {
        "GB/T":gbt,
        "title": title,
        "author": author,
        "inventor": inventor,
        "abstract": abstract,
        "announcement_no": announcement_no,
        "announcement_data": announcement_data,
        "article_type": article_type,
        "year": year,
        "key_word": key_word,
        "appid": appid,
        "qutoCount":qutoCount,
        "href": url
    }
    return info_obj
# time.sleep(2);
