import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from tqdm import tqdm
import time

def get_chapter(url):
    user_agent = UserAgent().random # 用户代理
    header = {'User-Agent' : user_agent,}

    
    respose = requests.get(url = url, headers = header)
    respose.encoding = 'gbk'
    html = respose.text
    bes = BeautifulSoup(html,"lxml")
    # print(bes)
    texts = bes.find("div",id = "list")
    chapters = texts.find_all("a")
    # print(texts)
    words = []

    ##对标签a内的内容进行提取
    for chapter in chapters:
        name = chapter.string #取出字符串，可以看出字符串只有章节号与章节名称，刚好符合我们所需
        url1 = url + chapter.get("href") #获得每一章节小说的url，可从html代码中看到每一个"href"前边均缺少初始的url，因此需要加上
        word = [url1, name] #以列表格式存储
        words.append(word) #最终加入总的大列表中并返回
        # print(word)
    return words



def get_title(url):


    # 目标网页的URL
    url = url

    # 发送GET请求获取网页内容
    response = requests.get(url)
    # 使用BeautifulSoup解析网页内容，指定解析器为html.parser
    soup = BeautifulSoup(response.text, 'html.parser')
    # 通过title标签来获取标题内容
    title = soup.title.string.split('-')[0]
    book_name = title
    # print(book_name)
    return book_name

def get_content(url,book_name):
    # url = 'https://www.quanben.so/110_110518/35740562.html'
    header = {'User-Agent' : UserAgent().random,}
    req = requests.get(url=url,headers = header)
    req.encoding = 'gbk'
    html = req.text
    bes = BeautifulSoup(html,"lxml")
    texts = bes.find("div", id = "content")
    texts_list = texts.text.split("\xa0"*4)
    # print(type(texts_list))
    # print(texts_list)
    with open(f"./text/chinese/{book_name}.txt","w") as file:    ##打开读写文件，逐行将列表读入文件内
        for line in texts_list:
            file.write(line+"\n")

if __name__ == '__main__':
    # get_chapter()
    targets = get_chapter("https://www.quanben.so/110_110518/")
    # count = 0
    for tar in tqdm(targets):
        print(tar)
        get_content(tar[0],tar[1])
        time.sleep(0.1)
        # count+=1
        # if(count == 10):
        #     break
    # get_content( 'https://www.quanben.so/110_110518/35740562.html',get_title('https://www.quanben.so/110_110518/35740562.html'))
    

