'''
1. 设置 name 设置 url 和 url_base
(2). (章内是否分页？)
3. 根据章内是否分页选择不同的 getTitle 模版
4. 更新 getTitle 的具体代码
5. 更新 getBody 的具体代码
6. 更新 get下一页/下一章 的具体代码
7. 更新 结束条件 的具体代码
'''
import os

import requests as rs  # python发网络请求的模块(好用)
from bs4 import BeautifulSoup  # HTML解析


def appendTitle(title: str, url: str):
    title_list.append(title)
    with open(path, encoding='utf-8', mode='a') as f:
        f.write(f"\n\n# {title}\n")
    print(f"{title}\t{url}")


def filter(contains: str):
    tbl = [
        ["\xa0", ''],
        ["\u3000", ''],
        [" ", ''],
        ["【笔】【下】【文】【学】", ''],
        ["www.bixiabook.com", ''],
        ["\n", ''],
        ["/p>", '\n'],
        ["<br>", '\n'],
        ["<br/>", '\n'],
        ["</br>", '\n'],
    ]
    for f in tbl:
        contains = contains.replace(f[0], f[1])
    return contains


# from main import Novel_Downloader
headers_ = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'}

title_list = []
first_page = True

file_name = "黎明之剑"
file_type = 'txt'
if not os.path.exists(os.path.abspath('./files')):
    os.mkdir(os.path.abspath('./files'))
path = f"./files/{file_name}.{file_type}"
if os.path.exists(path):
    os.remove(path)

url = "https://wap.bixiabook.com/42/42222/59616858.html"
url_base = "https://wap.bixiabook.com"

soup = BeautifulSoup(
    rs.get(url=url, headers=headers_).content, 'html.parser')
print(url+'\t'+soup.original_encoding)

title_content = ''

while True:
    title_content = soup.find(name='span', class_='title').text
    title_content = title_content.split("\xa0")[0]
    if first_page:
        first_page = False
        appendTitle(title_content, url)
    elif title_content != title_list[-1]:  # 选择通过比较文章title来确定是否是新的一章
        appendTitle(title_content, url)

    # 获取 Body
    buffer = "\n".join([str(x) for x in soup.find(
        name='div', id='chaptercontent').prettify().split("\n")[9:-22]])
    buffer = filter(buffer)
    with open(path, encoding='utf-8', mode='a') as f:
        f.write(buffer)

    # 获取 Url_next
    url = soup.find(name='a', href=True, id="pt_next")['href']
    url = f"{url_base}{url}"
    soup = \
        BeautifulSoup(rs.get(url=url, headers=headers_).content, 'html.parser')
    print(f"new url:{url}\t{soup.original_encoding}")

    if url == 'https://wap.bixiabook.com/book/42222/':
        print("已完全更新")
        print(title_list)
        break
