#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
import pymongo
import requests
import pymysql
from bs4 import BeautifulSoup


headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
}  # 伪装成浏览器


# 访问
def Request(url):
    res = requests.get(url, headers=headers)
    return res.text


# 清洗
def Soup(res):
    soup = BeautifulSoup(res, 'lxml')
    return soup


# 持久化 Mysql
def Mysql(name, author,time,link,stance):
    try:
        conn = pymysql.connect("localhost", "root", "123456", "book")  # 连接数据库
        cursor = conn.cursor()
        sql = """
            insert into user(name,author,time,url,text) value(%s,%s,%s,%s,%s)
         """
        cursor.execute(sql, (name, author,time,link,stance))
        conn.commit()
        conn.close()
    except Exception as erro:
        print(erro)

# 持久化 Mongo
def Mongo(id,name, author,time,link,stance):
    myclient = pymongo.MongoClient("mongodb://localhost:27017/")  # 连接 Mongo
    mydb = myclient["result"]  # 连接 数据库
    mycol = mydb["libraiy"]  # 连接 集合
    mydict = {"_id":id,"name":name, "author":author,"time":time,"link":link,"stance":stance}
    mycol.insert_one(mydict) #插入


def Task(url,pages):
    count = 0
    for i in range(1,pages+1):
        print("正在访问第"+str(i)+"页")
        try:  # 检测异常并抛出
            result = Soup(Request(url + str(i))).select(".title_li a:nth-child(3)")#链接，名称内容
            for i in range(len(result)):
                try:  # 检测异常并抛出
                    res = Soup(Request(result[i]['href'])).select(".abstract_dl dd")#摘要内容
                    author = Soup(Request(result[i]['href'])).select_one("tr:first-child")#作者
                    time = Soup(Request(result[i]['href'])).select_one("#wfpublishdate td")
                    #字符清洗
                    author = re.sub('\s+', '', author.text).strip()
                    author = re.sub(r'\[\d\]', ' ', author).strip()
                    time = re.sub('\s+', '', time.text).strip()
                    #字符清洗
                    name = result[i].text
                    link = result[i]['href']
                    stance = res[1].text.strip()
                    # 持久化
                    #Mysql(name, author,time,link,stance)
                    count+=1
                    Mongo(count,name, author,time,link,stance)
                except Exception as erro:
                    print(erro)
                    continue
        except Exception as erro:
            print(erro)
            continue
    return "任务结束"

def main():
    inser = str(input("请输入搜索内容:"))
    url = "http://elib.ecnudec.com:8088/S/Paper.aspx?q="+inser+"+DBID%3aWF_QK&p="
    pages = Soup(Request(url)).select_one("#ctl00_ContentPlaceHolder1_ctl06_PagerControl1").text
    print("当前搜索页数:" + pages)
    pages = int(re.sub(r'\D', '', pages))
    result = Task(url,pages)
    print(result)

if __name__ == '__main__':
    main()
