from urllib3.util.url import parse_url
from utils import ask_url
from bs4 import BeautifulSoup
from books.book_spider import BookSpider
from utils import replace_filename_invalid_chars


class D1zwComSpider(BookSpider):
    '''第一中文网小说爬虫'''

    def _get_html(self, url, referer=None):
        uri = parse_url(url)
        if referer is None:
            referer = f"{uri.scheme}://{uri.host}/"
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36",
            "Accept-Encoding": "gzip, deflate, br",
            "Host": uri.host,
            "Referer": referer
        }
        html = ask_url(url, headers=headers)
        return html

    def _get_title(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        title = soup.find('h1').text
        return title

    def _get_author(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        author = soup.find('div', id='info').find_all('p')[3].text
        author = author.replace('作&nbsp;&nbsp;&nbsp;&nbsp;者：', '')
        return author

    def _get_links(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        dds = soup.find('div', class_='listmain').find_all('dd')
        _dict = dict()
        for dd in dds:
            a = dd.find('a')
            title = a.text
            if title.find('章 ') == -1:
                title = title.replace('章', '章 ')
            title = replace_filename_invalid_chars(title)
            url = self._domain + a['href']
            _dict[title] = url
        return _dict

    def _remove_content_invalid_chars(self, content):
        import re
        content = content.replace('\xa0', ' ')
        content = content.replace('    ', '')
        content = content.replace('  ', '')
        content = content.replace(' ', '')
        content = re.sub('https.*html', '', content)
        content = re.sub('请记住.*com', '', content)
        return content

    def _get_content(self, title, html):
        import unicodedata
        soup = BeautifulSoup(html, 'html.parser')
        content = soup.find('div', id='content').text
        content = self._remove_content_invalid_chars(content)
        content = unicodedata.normalize('NFKC', content).replace('\r', '\n')
        item = {'title': title, 'content': content}
        return item
