import re
import unicodedata
import json
import os
from bs4 import BeautifulSoup
from urllib3.util.url import parse_url
from utils import ask_url


class Book():
    '''小说爬虫的基类，用于定义小说爬虫的基本属性和方法'''

    def __init__(self, url):
        self._url = url
        res = parse_url(url)
        self._domain = f'{res.scheme}://{res.host}'
        self._article_urls = dict()

        self.title = ''
        self.author = ''
        self.articles = []

    def init(self, path):
        '''初始化小说的信息'''
        print('正在初始化小说信息...')
        print('小说地址:', self._url)
        html = ask_url(self._url)
        self.title = self._get_title(html)
        path = f'{path}\\{self.title}'
        self.author = self._get_author(html)
        self._article_urls = self._get_article_urls(html)
        filename = f'{path}\\{self.title}.json'
        self._save_book_info(filename)

        for title, url in self._article_urls.items():
            filename = f'{path}\\{title}.txt'
            if os.path.exists(filename):
                print(f'已存在文件：{filename}')
                continue
            print('正在获取章节：', title, '，地址：', url)
            article = self._get_article_content(title, url)
            self.articles.append(article)
            self._save_article(filename, article)

        print('初始化完成！')

    def _get_title(self, html):
        '''获取小说的标题'''
        soup = BeautifulSoup(html, 'html.parser')
        title = soup.find('h2').text
        return title

    def _get_author(self, html):
        '''获取小说的作者'''
        soup = BeautifulSoup(html, 'html.parser')
        author = soup.find('div', class_='info').find(
            'div', class_='small').find_all('span')[0].text.split('作者：')[1]
        return author

    def _get_article_urls(self, html):
        soup = BeautifulSoup(html, "html.parser")
        _list = soup.find('div', class_='listmain').find_all('dd')[12:]
        _dict = dict()
        for dd in _list:
            a = dd.find('a')
            title = a.text
            if title.find('章 ') == -1:
                title = title.replace('章', '章 ')
            title = title.replace('?', '？').replace(':', '：')
            url = self._url.replace('index.html', '') + a['href']
            _dict[title] = url
        return _dict

    def _get_article_content(self, title, url):
        '''获取小说的章节内容'''
        html = ask_url(url)
        soup = BeautifulSoup(html, 'html.parser')
        content = soup.find('div', id='content').text
        content = self._remove_content_invalid_chars(content)
        content = unicodedata.normalize('NFKC', content).replace('\r', '\n')
        return {'title': title, 'content': content}

    def _remove_content_invalid_chars(self, content):
        '''移除文件内容中的无效字符'''
        content = re.sub('https.*html', '', content)
        content = re.sub('请记住.*com', '', content)
        return content

    def _save_book_info(self, filename):
        '''保存小说的信息'''
        obj = {
            'title': self.title,
            'author': self.author,
            'article_urls': self._article_urls
        }
        json_str = json.dumps(obj, ensure_ascii=False, indent=2)
        self._save_to_file(filename, json_str)
        print(f'小说信息保存到文件：{filename}')

    def _save_article(self, filename, article):
        '''保存小说的章节'''
        title = article['title']
        text = f"\n{title}\n{article['content']}\n"
        self._save_to_file(filename, text)
        print(f'章节《{title}》保存到文件：{filename}')
        path = os.path.dirname(filename)
        filename = f'{path}\\{self.title}.txt'
        self._save_to_file(filename, text, 'a')
        print(f'章节《{title}》追加到文件：{filename}')
        print('----------------------------------------')

    def _save_to_file(self, filename, text, mode='w'):
        '''保存JSON到文件'''
        path = os.path.dirname(filename)
        if not os.path.exists(path):
            os.makedirs(path)
        with open(filename, mode, encoding='utf-8') as f:
            f.write(text)

    def __format__(self, __format_spec: str = None):
        '''格式化输出小说的信息'''
        return f'书名：《{self.title}》，作者：{self.author}，章节数：{len(self.articles)}'
