# -*- codeding = utf-8 -*-
#@Time : 2020/7/2410:05
#@Author : Armor
#@File : 摘要爬取并发爬虫版.py
#@Software : PyCharm

import requests
import csv
from lxml import etree
import os
import random
from time import  sleep
import time
from threading import Semaphore
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, wait, ALL_COMPLETED

my_semaphore = Semaphore()

# 测试删除标签
# ceshi = '<a href="/doc_detail?dbcode=CAPJ&amp;tablename=CAPJLAST&amp;filename=JSJY20200713005&amp;df=P9UYFJkNLl3SahnSJl2b5ZGTyI1UqBjY2Y0MWJGVtt2YzoXdSNnNH1URycVcoBTWjZ1UNF3RWxWRjZHWU9ycTVESzx0a5tWSuNWdEdURy9ERhJ1Zj5Ue1h3YoVFNZZVVNdkeRdjNEdGeIJWTBJHTZVkVBpWa0lnW" target="_blank">融合本体和改进禁忌搜索策略的气象灾害主题<font class="Mark">爬虫</font>方法</a>'
#
# # match = re.sub( "</?font[^><]*>",repl='',string=ceshi)
# # if match:
# #     print(match)
#
# text = "abc;xyz"
# match = re.split(r'(;)',text)
# if match:
#     print(match)
# x = match[0]
# print(x)

# 线程
def getHTMLResponse(url):
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-User': '?1',
        'Sec-Fetch-Dest': 'document',
        'Referer': 'https://search.cn-ki.net/search?keyword=%E7%88%AC%E8%99%AB&db=CFLS&p=13',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',

    }

    try:
        r = requests.get(url,headers=headers,timeout=15)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        # 无框架延迟处理
        random_time = random.randint(3,10)
        sleep(random_time)
        return r
    except:
        print("获取html失败")

def getURLs(keyword,page_nums):
    urls = []
    url_format = "https://search.cn-ki.net/search?keyword={}&p={}"
    for page_num in range(-1,page_nums+1):
        url = url_format.format(keyword,page_num)
        urls.append(url)
    return  urls

def getData(content,csv_file):
    try:
        tree_node = etree.HTML(content)
        blocks = tree_node.xpath('//div[@class="mdui-col-xs-12 mdui-col-md-9 mdui-typo"]')
        for block in blocks:
            # print(etree.tostring(block,).decode('utf-8'))
            # 标题
            # 得到其中的所有文本信息，但没有了标签属性。
            title = block.xpath('.//h3/a')[0].xpath('string(.)').strip()
            print(title)
            # 作者
            author = block.xpath('.//div/span[1]//text()')[-1]
            # print(author)
            # 出版社
            publisher = block.xpath('.//div/span[3]//text()')[-1]
            # print(publisher)
            # 发表时间
            publish_time = block.xpath('.//div/span[4]//text()')[-1]
            publish_time = publish_time.replace(' ','')
            # print(publish_time)
            # 类型
            ptype = block.xpath('.//div/span[5]//text()')[-1]
            # print(ptype)

            # 摘要
            abstract = block.xpath('.//div[2]/p')[0].xpath('string(.)').strip()
            # print(abstract)

            # 下载链接
            download_link = block.xpath('.//div[3]/a/@href')[-1]
            # print(download_link)
            csv_file.writerow([title,author,publisher,publish_time,ptype,abstract,download_link])
    except:
        print("获取内容异常")

def crawlData(url,csv_file):
    respeonse = getHTMLResponse(url)
    my_semaphore.acquire()
    getData(respeonse.content, csv_file)
    my_semaphore.release()
    print(f"{url} is finished ！")

def main():
    page_nums = 7
    # keyword = "爬虫"
    keyword = "推荐系统综述"
    FILE_PATH = '.'+ os.sep + '25_crawl_abstract.csv'
    HEADERS = ["Title", "Author", "Publisher", "PublishTime", "Type", "Abstract", "Download_link"]
    urls = getURLs(keyword, page_nums)

    strat = time.time()
    with open(file=FILE_PATH, mode="w", newline="", encoding="utf-8") as file:
        # 创建csv写入对象
        csv_file = csv.writer(file)
        # 写入头部信息
        csv_file.writerow(HEADERS)
        # 写入数据
        contents = []
        with ThreadPoolExecutor(max_workers=5) as executor:
            for url, data in zip(urls, executor.map(getHTMLResponse, urls)):
                contents.append(data.content)
                print(f"{url} is finished")

        for index in range(len(contents)):
            getData(contents[index],csv_file)

    print("finished end!")
    print("用时：",time.time()-strat)

if __name__ == '__main__':
    main()
