# -*- coding: utf-8 -*-
import time

import requests,concurrent
from bs4 import BeautifulSoup
from datetime import datetime
from threading import Thread
from queue import Queue



save_chapter_num=1

def write_data(q):
    global save_chapter_num
    while not q.empty():  # 循环直到队列为空
        novel_data = q.get()    # 获取队列中的一个元素
        url2=novel_data[1]
        html = requests.get(url2, headers).text
        # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
        soup = BeautifulSoup(html, 'html.parser')
        # 获取每一章的标题标签
        title = soup.find('div', class_="bookname").find('h1')

        # 获取每一章的正文标签
        content = soup.find('div', id='content')

        # 判断标题和正文都不为空，获取其内容
        if title and content:
            title = title.get_text()
            content = content.get_text()
        try:
            novel_content=title+'\n\n'+content+'\n\n\n\n'
            time.sleep(2)
            print(title+' 爬取成功！\n')
        except:
            print(title+' 爬取失败！')

        while save_chapter_num < novel_data[0]+1:
            pass

        if save_chapter_num == novel_data[0]+1:
            fw.write(novel_content)
            print(title+'下载成功！！！')
            save_chapter_num+=1

if __name__ == "__main__":
    starttime=datetime.now()
    url='https://www.bagebbb.com/2_2537/'
    headers={"user-agent":"Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36 (KHTML,like Gecko)Chrome/127.0.0.0Safari/537.36"}
    #调接口拿到网页
    html=requests.get(url,headers).text
    # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
    soup=BeautifulSoup(html,'html.parser')
    #从筛子中取出小说的url信息
    items=soup.find("div",id="list").find_all("a")
    #定义一个空列表，进行接收全部的url
    urls=[]
    #遍历url，拼接成完整可直接访问的url
    for item in items:
        url=item['href']
        url='https:'+url
        urls.append(url)
    # print(urls)
    # url = 'https://www.bagebbb.com/2_2537/27578171.html'

    q=Queue()
    for i,url in enumerate(urls):
        q.put((i,url))

    with open(f"E:/TXT/txt/我瞎编功法，你们怎么都成大帝了.txt", 'a',encoding='utf-8')as fw:
        # 创建线程列表
        ts=[]
        for i in range(1,11):
            t=Thread(target=write_data,args=[q])
            t.start()
            ts.append(t)
        # 等待所有线程完成
        for t in ts:
            t.join()
        fw.close()

    endtime=datetime.now()
    print(f'本次下载共花费了{(endtime-starttime).seconds}秒')