#!/usr/bin/env python
# -*-coding:UTF-8 -*-
'''
@Project ：爬虫-波波老师
@File：12-bs4-sanguo.py
@Author ：文非
@Date：2021/3/1021:45
@require:爬取三国演义的所有章节和内容 https://www.shicimingju.com/book/sanguoyanyi.html
'''
import requests
from bs4 import BeautifulSoup
import HTMLParser
import lxml

if __name__=="__main__":
    # UA伪装
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
    }
    # 1 获取首页的url
    url = "https://www.shicimingju.com/book/sanguoyanyi.html"
    # 2 发送get请求
    page_text = requests.get(url=url, headers=headers).content
    # 3 获取首页中a标签的文本内容以及a标签的href属性值 bs4解析
     # 3.1 实例化一个BBeautifulSoupduixiang
    soup = BeautifulSoup(page_text,"lxml")
     # 3.2 解析章节标题和href地址
    # print(soup.find_all("li"))
    # print(data)
    li_list=soup.select(".book-mulu>ul>li")
    fp = open("./三国演义.txt","w",encoding="utf-8")
    for li in li_list:
        titile = li.a.string
        # 4 拼接章节详情页的url
        detail_html = "https://www.shicimingju.com" + li.a["href"]
        # 5 对详情页url发送get请求
        detail_page = requests.get(url=detail_html, headers=headers).content
        detail_soup = BeautifulSoup(detail_page,"lxml")
        # 6 获取详情页的<div class="chapter_content">这个标签中的文本内容
        content = detail_soup.find("div", class_="chapter_content").text
        # 7 持久化存储
        fp.write(titile+":"+content+"\n")
        print(titile, "爬取成功")
