import requests
import time
import os
import lxml.html
import redis
from pymongo import MongoClient
# 查询出所有的链接列表
# 定义对象
redisClient = redis.StrictRedis(host='www.yueshushu.top',password='yjl250',
                                db=1,port=6379)
mongoClient = MongoClient ('mongodb://stock88:123456@www.yueshushu.top:27017')
def get_all_hrefList (startUrl,content):
    """
    获取每一章节链接，再保存到列表中进行返回
    :param url:
    :return:
    """
    print(f'开启 {startUrl}')
    # 里面包含所有的链接地址
    selector = lxml.html.fromstring(content)
    # 再次进行筛选，只要对应的链接
    toc_url_path_list = selector.xpath("//*[@id='top']/div[3]/table/tbody")
    # 对每一个 a 链接进行处理
    for url in toc_url_path_list:
        urlHrefList = url[0].xpath('//tr/td/a/text()')
        for url2 in urlHrefList:
            if (url2.endswith('.html')):
                print('添加url:' + (startUrl + url2))
                redisClient.lpush('dev_queue',startUrl+url2)
            else:
                print('不爬虫该页面')

# 获取到章节和正文
def get_article(content):
    selector = lxml.html.fromstring(content)
    text_name = selector.xpath("//html/head/title/text()")[0]
    text_content = content
    text_content = text_content.replace('<br/>','')
    return text_name,text_content

# 保存
def save(collection,text_name,text_content):
    # 制作目录
    data = {'title': text_name,'content': text_content}
    collection.insert_one(data)
startTime = time.time()
print('开始爬虫')
startHtmlResponse = requests.get("https://yueshushu.top/dev/")
startHtmlContent = startHtmlResponse.content.decode()
get_all_hrefList('https://yueshushu.top/dev/',startHtmlContent)

urlList = redisClient.lrange('dev_queue',0,-1)
print(f'要获取的页面:{urlList}')
for url in urlList:
    #获取每一个页面的内容
    tempResponse = requests.get(url)
    text_name, text_content = get_article(tempResponse.content.decode())
    db = mongoClient['stock']
    save(db['python2'],text_name,text_content)
    print(f'爬虫 {url} 并保存成功')
print('爬虫成功')
endTime = time.time()
print(f'共用时: {endTime - startTime}')
# 16.042192220687866