# -*- codeding = utf-8 -*-
#@Time : 2020/7/2720:16
#@Author : Armor
#@File : idata论文摘要爬取.py
#@Software : PyCharm


import requests
import csv
from bs4 import BeautifulSoup
from lxml import etree
import os
import time

# 获取html页面
def getHTMLResponse(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',
    }
    try:
        r = requests.get(url,headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r
    except:
        print("获取html失败")

def getData(content,csv_file):
    try:
        tree_node = etree.HTML(content)
        blocks = tree_node.xpath('//div[@class="mdui-col-xs-12 mdui-col-md-9 mdui-typo"]')
        for block in blocks:
            # print(etree.tostring(block,).decode('utf-8'))
            # 标题
            # 得到其中的所有文本信息，但没有了标签属性。
            title = block.xpath('.//h3/a')[0].xpath('string(.)').strip()
            print(title)

            # 作者
            author = block.xpath('.//div/span[1]//text()')[-1]
            # print(author)
            # 出版社
            publisher = block.xpath('.//div/span[3]//text()')[-1]
            # print(publisher)
            # 发表时间
            publish_time = block.xpath('.//div/span[4]//text()')[-1]
            publish_time = publish_time.replace(' ','')
            # print(publish_time)
            # 类型
            ptype = block.xpath('.//div/span[5]//text()')[-1]
            # print(ptype)

            # 摘要
            abstract = block.xpath('.//div[2]/p')[0].xpath('string(.)').strip()
            # print(abstract)

            # 下载链接
            download_link = block.xpath('.//div[3]/a/@href')[-1]
            # print(download_link)

            csv_file.writerow([title,author,publisher,publish_time,ptype,abstract,download_link])
    except:
        print("获取内容异常")

def main():
    page_nums = 10
    keyword = "爬虫"
    url = "https://search.cn-ki.net/search?keyword={}&p={}"
    FILE_PATH = '.'+ os.sep + '50_crawl_abstract.csv'
    HEADERS = ["Title", "Author", "Publisher", "PublishTime", "Type", "Abstract", "Download_link"]

    strat = time.time()
    with open(file=FILE_PATH, mode="w", newline="", encoding="utf-8") as file:
        # 创建csv写入对象
        csv_file = csv.writer(file)
        # 写入头部信息
        csv_file.writerow(HEADERS)
        # 写入数据
        for page_num in range(-1,page_nums+1):
            start_url = url.format(keyword, page_num)
            print(start_url)
            response = getHTMLResponse(start_url)
            getData(response.content,csv_file)
            print("第{}页 finished".format(page_num+1))
    print("finished end!")
    print("finished end!")
    print("用时：",time.time()-strat)

if __name__ == "__main__":
    main()

