from abc import ABC, abstractmethod
from urllib3.util.url import parse_url
from utils import ask_url


class Spider(ABC):
    '''
    爬虫的基类，用于定义爬虫的基本属性和方法
    据说scrapy更强大，但是我不想学，所以就自己写了一个简单的爬虫
    '''

    def __init__(self, url):
        self._url = url
        self._url_res = parse_url(url)
        self._domain = f'{self._url_res.scheme}://{self._url_res.host}'
        self._links = dict()

        self.title = ''
        self.author = ''
        self.items = []

    def start(self, path):
        '''开始爬取'''
        html = self._get_html(self._url)
        self.title = self._get_title(html)
        self.author = self._get_author(html)
        if self.title != '':
            path = f'{path}/{self.title}'
        self._links = self._get_links(html)
        filename = f'{path}/{self.title}.json'
        self._save_info(filename)
        self._get_contents(path)

    def _get_html(self, url, referer=None):
        return ask_url(url)

    @abstractmethod
    def _get_title(self, html):
        pass

    @abstractmethod
    def _get_author(self, html):
        pass

    @abstractmethod
    def _get_links(self, html):
        pass

    def _save_info(self, filename):
        '''保存配置信息'''
        import json
        obj = {'title': self.title, 'author': self.author,
               'url': self._url, 'links': self._links}
        text = json.dumps(obj, ensure_ascii=False, indent=2)
        self._save_to_file(filename, text)
        print(f'已保存配置信息到文件：{filename}')

    @abstractmethod
    def _get_contents(self, path):
        pass

    def _save_to_file(self, filename, text, mode='w'):
        '''保存JSON到文件'''
        import os
        path = os.path.dirname(filename)
        if not os.path.exists(path):
            os.makedirs(path)
        with open(filename, mode, encoding='utf-8') as f:
            f.write(text)

    def __format__(self, __format_spec: str = None):
        '''格式化输出小说的信息'''
        return f'标题：《{self.title}》，作者：{self.author}，章节数：{len(self._links)}'
