'''
Author: fg
Date: 2023-01-29 17:53:06
LastEditors: fg
LastEditTime: 2023-01-29 17:53:19
'''
import requests
from bs4 import BeautifulSoup

def geturl():
    url = "https://www.xqb5200.com/95_95204/"
    header = {"User-Agent":
                  "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3861.400 QQBrowser/10.7.4313.400"}
    req = requests.get(url = url, headers = header)
    req.encoding = "gbk"
    html = req.text
    bes = BeautifulSoup(html,"lxml")
    texts = bes.find("div", id="list")
    chapters = texts.find_all("a") #该函数可以返回list下的标签为a的所有信息
    words = [] #创建空的列表，存入每章节的url与章节名称
    ##对标签a内的内容进行提取
    for chapter in chapters:
        name = chapter.string #取出字符串，可以看出字符串只有章节号与章节名称，刚好符合我们所需
        url1 = url + chapter.get("href") #获得每一章节小说的url，可从html代码中看到每一个"href"前边均缺少初始的url，因此需要加上
        word = [url1, name] #以列表格式存储
        words.append(word) #最终加入总的大列表中并返回
    return words

if __name__ == '__main__':
    target = geturl()
    header = {"User-Agent":
                  "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3861.400 QQBrowser/10.7.4313.400"}
    for tar in target:
        req = requests.get(url=tar[0],headers = header)
        req.encoding = 'gbk'
        html = req.text
        bes = BeautifulSoup(html,"lxml")
        texts = bes.find("div", id = "content")
        texts_list = texts.text.split("\xa0"*4)
        with open("./"+ tar[1] + ".txt","w") as file:  #写入文件路径 + 章节名称 + 后缀
            for line in texts_list:
                file.write(line+"\n")

