import logging
import os
import re

import yaml
from bs4 import BeautifulSoup

import common
from common import BeautySoupTool
from common import FileTool


# 下载文本文档
def get_page_text(url, file_name, encoding):
    soup = BeautySoupTool.get_beautysoup(url)
    # 判断是下一页还是下一章
    child_next = soup.beautySoup.select("body table tr td[class='next'] a[id='pb_next']")
    # next_url = child_next[0].name("href")
    next_url = child_next[0].get("href")
    if not next_url.startswith('https'):
        next_url = pre_url + next_url
    if len(child_next) > 0:
        next_ = child_next[0].text
        # text = child_text.text
        child_texts = soup.beautySoup.select("body div div[id='nr'] div[id='nr1']")
        text = child_texts[0].text
        text = re.sub(r' +', ' ', text)
        text = re.sub(r'|\�+|�+|\？+|≈+|ot;+|&ap;+', '', text)
        text = re.sub(r'本章未完请翻开下方下一章继续阅读+|-335本章未完请翻开下方下一章继续阅读+|(责任编辑admin)+|（本章未完，请点击下一页继续阅读）+', '', text, flags=re.UNICODE)

        text = common.replace_special_char(text)

        text = FileTool.filter_unencodable(text)
        try:
            with open(file_name, 'a+', encoding=encoding) as f:
                f.write(text + '\n')
        except UnicodeEncodeError as e:
            logging.info(e)
            raise e

        if next_ == ' 下 页 ':
            logging.warning('获取下一页文本：{}'.format(next_url))
            get_page_text(next_url, file_name, encoding)
    else:
        logging.warning("无下一页")


# 获取下一页和尾页链接
def get_next_and_end_page(next_page):
    next_page_url = None

    end_page_url = None
    for np in next_page:
        # next_url = np.name('href')
        next_url = np.get('href')
        text = np.text
        if text == '下一页':
            if not next_url.startswith('https'):
                next_url = pre_url + next_url
            next_page_url = next_url
        elif text == '尾页':
            if not next_url.startswith('https'):
                next_url = pre_url + next_url
            # logging.info('获取下一页章节列表：{}'.format(next_url))
            end_page_url = next_url
    return [next_page_url, end_page_url]


# 获取页面章节列表
def get_chapter_list(page_url, file_name, encoding, chapter_list):
    if page_url is None or page_url == '':
        return file_name, encoding, chapter_list
    logging.info('开始获取【{}】章节列表'.format(page_url))
    soup = BeautySoupTool.get_beautysoup(page_url)

    if file_name is None:
        file_name = soup.title
    if encoding is None:
        encoding = soup.chardet_encoding  # 编码
    if chapter_list is None:
        chapter_list = []
    chapters = soup.beautySoup.select("body div[class='cover'] ul[class='chapter'] a")

    # 检查子列表中的每个元素是否都在父列表中
    if all(item in chapter_list for item in chapters):
        logging.warning("当前页面【{}】获取的章节列表已存在！不在重复度获取".format(page_url))
    else:
        logging.warning("【{}】 获取的章节列表数量：{}".format(page_url, len(chapters)))
        chapter_list.extend(chapters)
        # 下一页
        if len(chapters) >= 20:
            next_page = soup.beautySoup.select("body div[class='page'] a")
            next_page_url, end_page_url = get_next_and_end_page(next_page)
            if next_page_url is None or next_page_url == '':
                logging.warning(
                    "当前页【{}】获取不到下一页链接！结束获取！next_page_url：{}".format(page_url, next_page_url))
                return file_name, encoding, chapter_list
            elif next_page_url == page_url:
                logging.warning(
                    '【{}】当前已是最后一页：next_url:{}; end_page_url:{}'.format(page_url, next_page_url, end_page_url))
            else:
                get_chapter_list(next_page_url, file_name, encoding, chapter_list)
    return file_name, encoding, chapter_list


# 单个地址下载 url 下载地址 is_cover 文件重复是是否覆盖，True 是，False:否
def down_one_from_url(url):
    state = 'finished'  # 执行状态
    title, encoding, chapter_list = get_chapter_list(url, None, None, None)
    total = len(chapter_list)
    if is_cover:
        file_name = '%s.%s' % (title, 'txt')
        with open(file_name, 'w', encoding=encoding) as f:
            f.write('')

    else:
        file_name = FileTool.check_file_exist(os.getcwd(), title, 'txt')
    try:
        for index, ahref in enumerate(chapter_list, 1):
            # child_url = ahref.name("href")
            child_url = ahref.get("href")
            if not child_url.startswith('https'):
                child_url = pre_url + child_url
            logging.info("获取第{}/{} 个链接 ：{}".format(index, total, child_url))
            try:
                get_page_text(child_url, file_name, encoding)
            except Exception as e:
                logging.error(e)
                logging.info("获取第{}/{} 个链接出错！ ：{}；e :{}".format(index, total, child_url, e))
                # 保存未下载的链接
                chapter_list = chapter_list[index - 1:]
                logging.info("保存出错链接！ ：{}".format(chapter_list))
                with open(title + ".log", 'w', encoding='utf-8') as f:
                    f.write('\n'.join(map(str, chapter_list)))  # 将列表转换为字符串，并用换行符分隔每个元素
                raise e
    except Exception as e:
        state = 'failed'
        raise e
    finally:
        with open('Done-text.log', 'a+', encoding=encoding) as f:
            f.write('file_name: {} ; url: {} ;  state: {}'.format(file_name, url, state) + '\n')
        if state == 'failed':
            logging.info("文件【{}】下载失败！url:{}".format(file_name, url))
        else:
            logging.info("文件【{}】下载完毕；url:{}".format(file_name, url))


# 从多个url下载文本
def down_multy_from_url():
    file_url_size = len(file_list)
    logging.warning("url总数量：{}".format(file_url_size))
    for f_i, m in enumerate(file_list, 1):
        url = m.get("url")
        logging.info("第 {}/{} 个 文件【{}】 开始下载！链接 {}，".format(f_i, file_url_size, m.get("name"), url))
        down_one_from_url(url)
        logging.info("第 {}/{} 个 文件【{}】 下载完成！链接 {}，".format(f_i, file_url_size, m.get("name"), url))


# 从错误文件重新下载
def down_from_log(log_file, file_name, encoding):
    with open(log_file, 'r', encoding='utf-8') as f:
        read_lines = f.readlines()
        origin_data = read_lines
        total_line = len(read_lines)
        is_error = False
        for index, line in enumerate(read_lines, 1):
            line = line.strip('\n')
            soup = BeautifulSoup(line, 'lxml')
            a_tag = soup.a
            child_url = a_tag.get("href")
            if not child_url.startswith('https'):
                child_url = pre_url + child_url
            try:
                logging.info('获取第 {}/{}条文本：{}'.format(index, total_line, child_url))
                get_page_text(child_url, file_name, encoding)
            except Exception as e:
                is_error = True
                logging.error('第 {}/{}条下载失败！'.format(index, total_line))
                origin_data = origin_data[index - 1:]
                raise e
            finally:
                if not is_error:
                    origin_data = []
                else:
                    print(origin_data)

    if len(origin_data) > 0:
        with open(log_file, 'w', encoding='utf-8') as f:
            f.write('\n'.join(origin_data))


file_map = [
]
# 暴露女友之***；
file_list = [
    {'name': '沉欲之小西的美母教师（1-29章+特刊）全文阅读目录', 'url': 'https://m.mahuaxs.com/30/30473/'}
    , {'name': '沉欲之小西的美母教师全文阅读目录', 'url': 'https://m.mahuaxs.com/30/30733/'}

]

pre_url = 'https://m.mahuaxs.com/'

if __name__ == '__main__':
    is_cover = False  # 是否覆盖已存在文件
    # get_page_text('https://m.mahuaxs.com/29/29614/297657.html', 'test_specialchar2.txt', 'gbk')
    # down_one_from_url('https://m.mahuaxs.com/30/30473/')
    down_multy_from_url()
    pass
