# 官网网址 https://beautifulsoup.readthedocs.io/zh_CN/v4.4.0/
import os  # 为了路径管理的需要

import requests as rs  # python发网络请求的模块(好用)
from bs4 import BeautifulSoup  # HTML解析
# from bs4 import SoupStrainer  # 仅解析部分文档
headers_ = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
            'Chrome/51.0.2704.63 Safari/537.36'}


class Novel_Downloader:
    header_ = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
               'Chrome/51.0.2704.63 Safari/537.36'}
    urls = ['']
    cur_i = 0
    cur = ''
    r = None
    # encoding = 'gbk'
    '''
    BeautifulSoup 有自动检测编码的功能
    用soup.original_encoding查询到本次猜测的编码，如果发现是乱码，那么就将其加入exclude_encodings中
    多试几次，很快就能猜到正确的编码
    就算BeautifulSoup自己猜到的编码有点错也没问题，有几个字乱码还是可以看的。而，rs...decode()这样解码，有一点点不对都会导致程序报错中止。应当避免这样写
    soup = BeautifulSoup(rs.get(...).content, 'html.parser',
                         exclude_encodings=['windows-1252'])
    print(soup.findAll(id='title'))
    # print(soup.prettify())
    print(soup.original_encoding)
    '''
    title_list = []
    body_list = []
    save_name = ''
    save_path = './files/%s' % (save_name)

    def __init__(self, urls, save_name):
        self.urls = urls
        self.cur = 'https://www.baidu.com/' if (
            urls is None or len(urls) <= 0) else urls[0]
        self.save_name = '默认小说名' if save_name is None or self.cur == 'https://www.baidu.com/' else save_name
        self.save_path = './files/%s' % (save_name)
        self.r = rs.get(url=self.cur, headers=self.header_)
        print("共有 %d 个章节" % len(urls))

    def get_body(self, exclude_encodings_=['']):
        soup = BeautifulSoup(self.r.content, 'html.parser',
                             exclude_encodings=exclude_encodings_)
        return [str(x).replace('\xa0\xa0', '').replace('\n', '') for x in soup.find(name='div', class_="panel-body content-body content-ext").contents if str(x) != '<br/>' and x != '\n']
        # return list
        '''
        body_list = list
        body = '\n\n'.join(list)
        '''

    def get_title(self, exclude_encodings_=['']):
        soup = BeautifulSoup(self.r.content, 'html.parser',
                             exclude_encodings=exclude_encodings_)
        print(soup.original_encoding)

        return soup.find(name='div', class_='panel-heading').text
        # return title
        '''
        title=title
        '''

    def check_name(self, name):
        if name is not None:
            self.save_name = name
            self.save_path = './files/%s' % (name)
        else:
            print(self.save_name)
            print(self.save_path)

    def add(self, title_or_body, code):
        if title_or_body == 'title':
            self.title_list.append(code)
        elif title_or_body == 'body':
            self.body_list.append(code)
        else:
            raise RuntimeError("title_or_body 必须是 str::title 或者 str::body")

    def save(self, type='txt'):
        if not os.path.exists(os.path.abspath('./files')):
            os.mkdir(os.path.abspath('./files'))
        path = self.save_path + '.' + type

        with open(path, encoding='utf-8', mode='w+') as f:
            for i in range(len(self.title_list)):
                f.write(self.title_list[i])
                f.write(self.body_list[i])

    def get_steps(self):
        return len(self.urls)

    def next(self):
        self.cur_i += 1
        if (self.cur_i < len(self.urls)):
            self.cur = self.urls[self.cur_i]
            self.r = rs.get(url=self.cur, headers=self.header_)
            print("切换至第 %d 个章节: %s" % (self.cur_i+1, self.cur))
        else:
            print('超出范围')

    # 针对目录页是分页的，章节内容也是分页的小说，设计逐页爬取至最后一页的机制
    def read_pages(self, name, type, url_start, url_base):
        count = 1
        title_list = []
        first_page = True

        if not os.path.exists(os.path.abspath('./files')):
            os.mkdir(os.path.abspath('./files'))
        path = './files/' + name + '.' + type
        os.remove(path)
        url = url_start

        soup = BeautifulSoup(
            rs.get(url=self.cur, headers=self.header_).content, 'html.parser')
        while True:
            print(url+'\t'+soup.original_encoding)

            if first_page:
                title = "\n# 第 %d 章 %s\n" % (count,
                                             soup.find(name='span', class_='title').text.split(' ', 1)[0])
                with open(path, encoding='utf-8', mode='a') as f:
                    f.write(title)
                title_list.append(title)
                print(title + url)
                first_page = False

            body_list = [str(x).replace('\xa0', '')
                         for x in soup.find(id='BookText').contents if str(x) != '<br/>']
            body_list[0] = body_list[0][5:]
            with open(path, encoding='utf-8', mode='a') as f:
                f.write('\n\n'.join(body_list))

            url = url_base + \
                soup.findAll(name='a', href=True, rel='next')[-1]['href']
            print("new url:" + url + "\n")

            mode_next = soup.findAll(name='a', href=True, rel='next')[-1].text
            if mode_next == "下一页":
                soup = BeautifulSoup(
                    rs.get(url=self.cur, headers=self.header_).content, 'html.parser')
                continue
            elif mode_next == "下一章":
                first_page = True
                count += 1
                soup = BeautifulSoup(
                    rs.get(url=self.cur, headers=self.header_).content, 'html.parser')
            else:
                print("已全部更新")
                print(title_list)
                break


if __name__ == "__main__":
    url_index = "https://www.xianqihaotianmi.com/book/32836.html"
    url_base = "https://www.xianqihaotianmi.com"
    url_list = [(url_base + x['href'])
                for x in
                BeautifulSoup(rs.get(url_index).content, 'html.parser')
                .findAll(name='a', href=True, rel="nofollow")][1:]
    t = Novel_Downloader(
        urls=url_list,
        save_name="我师兄实在太稳健了")
    try:
        for i in range(0, t.get_steps()):
            # 标题前加一个 ‘#’ 这样可以在markdown里面作为一级标题
            title = '\n\n# 第%d章: %s\n\n' % (
                i+1, t.get_title().split("正文卷")[1].strip())
            print(title)
            body = '\n\n'.join(t.get_body())
            t.add(title_or_body='title', code=title)
            t.add(title_or_body='body', code=body)
            t.next()
    except IndexError:
        print("网址：%s 解析异常，请用浏览器检查网页并更新代码" % (t.cur))
        print("如果网页为空，请忽视这条信息\n\n")
    t.save(type='md')
    print(t.title_list)
