#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time    : 2023/9/5 21:45
# @Author  : BadMan
# @FileName: 01_Bs4.py
"""
    三国演义 进行数据爬取并分析
"""
import requests
from bs4 import BeautifulSoup

def getHTMLText(url,headers):
    try:
        re = requests.get(url,headers=headers,timeout=10)
        re.raise_for_status()
        re.encoding = re.apparent_encoding
        return re.text
    except:
        print("获取响应失败",url)

def paeserPage(page_text,headers):
    soup = BeautifulSoup(page_text,'lxml')
    # 解析章节标题 和 详情页
    li_list = soup.select('.book-mulu > ul > li')
    fp = open('./sangguo.txt','w',encoding='utf-8')
    for li in li_list:
        title = li.a.string
        detail_url = 'http://www.shicimingju.net.cn/book/' + li.a['href']
        # 详情页 url
        detail_page_text = getHTMLText(detail_url,headers)
        # 解析出详情页中相关的章节内容
        try:
            detail_soup = BeautifulSoup(detail_page_text,'lxml')
        except:
            print("详情加载失败",detail_page_text)
        # 获取 class = chapter_content 的 div 标签
        div_tag = detail_soup.find('div',class_='chapter_content')
        # 解析到了章节的内容
        content = div_tag.text
        fp.write(title + ":" + content + '\n')
        print(title,'爬取成功')

if __name__ == '__main__':
    headers = {
        'User-Agent': 'Mozilla/5.0 '
                      '(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69'
    }
    start_url = "http://shici.yiduiyi.net.cn/book/sanguoyanyi.html"

    page_text = getHTMLText(start_url,headers)
    paeserPage(page_text,headers)