import unicodedata
from bs4 import BeautifulSoup
from books.book_spider import BookSpider
from utils import ask_url, replace_filename_invalid_chars


class ZonghengComSpider(BookSpider):
    '''纵横中文网小说爬虫'''

    def _get_title(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        title = soup.find('h1').text
        return title

    def _get_author(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        author = soup.find('div', class_='book-meta').find('a').text
        return author

    def _get_links(self, html):
        import re
        soup = BeautifulSoup(html, 'html.parser')
        uls = soup.find_all('ul', class_='chapter-list clearfix')
        _dict = dict()
        for ul in uls:
            dds = ul.find_all('li')
            for dd in dds:
                a = dd.find('a')
                title = a.text
                if title.find('章 ') == -1:
                    title = title.replace('章', '章 ')
                title = re.sub('\\s+', ' ', title)
                title = replace_filename_invalid_chars(title)
                url = a['href']
                _dict[title] = url
        return _dict

    def _remove_content_invalid_chars(self, content):
        import re
        content = content.replace('\xa0', ' ')
        content = content.replace('    ', '')
        content = content.replace('  ', '')
        content = content.replace(' ', '')
        content = re.sub('https.*html', '', content)
        content = re.sub('请记住.*com', '', content)
        return content

    def _get_content(self, title, html):
        soup = BeautifulSoup(html, 'html.parser')
        ps = soup.find('div', class_='content').find_all('p')
        content = ''
        for p in ps:
            content += p.text + '\n'
        content = self._remove_content_invalid_chars(content)
        content = unicodedata.normalize('NFKC', content).replace('\r', '\n')
        item = {'title': title, 'content': content}
        return item
