#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/12/26 22:43
# @Author : George
# ==================================================
# <a href="/guwen/bookv_6dacadad4420.aspx">第一回</a>
# 第一回网址
# https://www.gushiwen.cn/guwen/bookv_6dacadad4420.aspx
# ==================================================

from bs4 import BeautifulSoup
import requests

url = "https://www.gushiwen.cn/guwen/book_46653FD803893E4F7F702BCF1F7CCE17.aspx"

# UA伪装：模拟浏览器
header = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
}
# 获取响应对象
response = requests.get(url, headers=header)
# 实例化bs对象
soup = BeautifulSoup(response.text, "lxml")
a_liat = soup.select(".bookcont >ul > span >a")
fp = open("三国.txt","w",encoding="utf-8")
# 解析章节标题和详情页面的url
for tag in a_liat:
    title = tag.text
    detail_url = "https://www.gushiwen.cn/"+tag["href"]
    # 对详情页面发起请求
    detail_page = requests.get(detail_url, headers=header)
    # 解析出详情页面中的内容
    detail_soup = BeautifulSoup(detail_page.text,"lxml")
    # 使用此种方法出现一个问题,就是文章都是在p标签里面,所以文章不会换行
    # content = detail_soup.find("div",class_="contson").text
    fp.write(title + ":")
    for line in detail_soup.select('.contson > p'):
        fp.write(line.text+"\n")
    fp.write("\n\n")
    print(f"{title}爬取成功")
fp.close()