import requests as req;
import os;
import time;
import lxml;
from bs4 import BeautifulSoup;

if __name__ == '__main__':

    file_name = "./bug_file/三国演义/";

    if not os.path.exists(file_name):
        os.makedirs(file_name);

    url = "https://www.shicimingju.com/book/sanguoyanyi.html";

    # UA伪装
    header = {
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
        "Mobile-User-Agent":"Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36"
    }

    result = req.get(url=url,headers=header);

    # print('响应:\nencoding={}'.format(result.encoding));
    html_text = result.text;
    html_text = html_text.encode("ISO-8859-1");
    html_text = html_text.decode("utf-8");

    # 固定写法,把html加载到bs对象
    bs_obj = BeautifulSoup(html_text,'lxml');

    # 获取标签的几种方式
    # 1. bs_obj.targeName,获取在对象中出现的第一个指定标签
    # print(bs_obj.a); # <a href="/"><img id="logo" src="/public/image/logo.png"/></a>
    # 2. bs_obj.find(targeName),获取在对象中出现的第一个指定标签,等同于 bs_obj.targeName
    # print(bs_obj.find("a")); # <a href="/"><img id="logo" src="/public/image/logo.png"/></a>
    # 3. bs_obj.findAll(targeName),获取在对象中出现的所有指定标签
    # print(bs_obj.find_all("a")); # [<a href="/"><img id="logo" src="/public/image/logo.png"/></a>,...]
    # 4. bs_obj.select(".main_left h1") css内支持的选择器都可以

    # 获取标签文本数据有三种方法 .text/.string/.get_text()
    # .text/.get_text() 获取标签内的所有文本内容
    # .string 只获取子元素文本

    book_name = bs_obj.select("#main_left h1")[0].text;
    _as = bs_obj.select("#main_left > div > div.book-mulu > ul > li > a");

    with open(file_name+"三国演义.txt","w",encoding="utf-8") as wf:

        for a in _as:
            title = a.string; # 获取标题

            url = "https://www.shicimingju.com/"+a['href'];

            # 获取内容
            result = req.get(url=url,headers=header);

            # print('响应:\nencoding={}'.format(result.encoding));
            count_html_text = result.text;
            count_html_text = count_html_text.encode("ISO-8859-1");
            count_html_text = count_html_text.decode("utf-8");

            count_bs_obj = BeautifulSoup(count_html_text, 'lxml');

            # 内容
            count_title = count_bs_obj.select("#main_left > div.card.bookmark-list > h1")[0].string;
            count = count_bs_obj.select("#main_left > div.card.bookmark-list > div")[0].text;

            wf.write(title+" : "+count+"\r");
            print(title,"爬取成功!!")

    print("爬取完毕!!")
