# -*- coding: utf-8 -*-
import re

import requests
from bs4 import BeautifulSoup
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor

def down_txt(url):
    html = requests.get(url, headers).text
    #使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
    soup=BeautifulSoup(html,'html.parser')
    #获取每一章的标题标签
    title=soup.find('h1',class_="wap_none")
    # 获取每一章的正文标签
    content=soup.find('div',id='chaptercontent')
    # 判断标题和正文都不为空，获取其内容
    if title and content:
        title=title.get_text()
        content=content.get_text()
    #进行文件下载，保存到本地，一章一个txt文件
    with open(f"E:/TXT/{title}.txt",'w')as f:
        f.write(content)
        print(title+'下载完成..........')

if __name__ == "__main__":
    starttime=datetime.now()
    url='https://www.fq44.cc/index/6255/'
    headers={"user-agent":"Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36 (KHTML,like Gecko)Chrome/127.0.0.0Safari/537.36"}
    #调接口拿到网页
    html=requests.get(url,headers).text
    # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
    soup=BeautifulSoup(html,'html.parser')
    #从筛子中取出小说的url信息
    items=soup.find("div",class_="listmain").find_all("a")
    #定义一个空列表，进行接收全部的url
    urls=[]
    #遍历url，拼接成完整可直接访问的url
    for item in items:
        url=item['href']
        if url!='javascript:dd_show()':
            url='https://www.fq44.cc'+url
            urls.append(url)
    #遍历完整的url，调用下载方法，进行下载
    # for url in urls:
    #     down_txt(url)

    with ThreadPoolExecutor(max_workers=40) as exe:
        for url in urls:
            exe.submit(down_txt,url)
    endtime=datetime.now()
    print(f'本次下载共花费了{(endtime-starttime).seconds}秒')







