import requests
import yaml
import common
from common import BeautySoupTool
from common import DateTool


def get_page_text(url):
    soup = BeautySoupTool.BeautySoupTool(url)

    # 判断是下一页还是下一章
    child_next = soup.beautySoup.select("body table tr td[class='next'] a[id='pb_next']")
    next_url = pre_url + child_next[0].get("href")
    if len(child_next) > 0:
        next_ = child_next[0].text
        # text = child_text.text
        child_texts = soup.beautySoup.select("body div div[id='nr'] div[id='nr1']")
        replace = child_texts[0].text.replace(' ', '\n')
        with open(file_name, 'a+', encoding='utf-8') as f:
            f.write(replace + '\n')
        if next_ == '下一页':
            get_page_text(next_url)

if __name__ == '__main__':
    pre_url = 'http://m.wxshuku.la/'
    url = 'https://m.swzw.la/254/254363/45736793_3.html'
    soup = BeautySoupTool.BeautySoupTool(url)
    select = soup.beautySoup.select("body div[id='chaptercontent']")
    soup_title = soup.title  # 文章标题
    file_name = '%s.txt' % (soup_title)
    text = select[0].text
    replace = text.replace(' ', '')
    with open(file_name, 'a+', encoding='utf-8') as f:
        # for p in select:
        f.write(replace + '\n')
