import re
from bs4 import BeautifulSoup
from urllib import parse

class Rule(object):
    def __init__(self,url):
        self.url=url

    def resolve_catalog(self,res, selector ):
        soup = BeautifulSoup(res, 'html.parser')
        fiction_list = soup.select(selector)

        fiction_catalog_list = []
        uri = parse.urlparse(self.url)
        domain = uri.scheme + '://' + uri.netloc

        for fiction_el in fiction_list:
            href = fiction_el.attrs['href']
            href_url = parse.urlparse(href)

            path = href_url.path[href_url.path.rfind('/')+1:-5]

            if not href.startswith(uri.scheme):
                if href.startswith('/'):
                    href = domain+href
                else :
                    if self.url.endswith('.html'):
                        href = self.url[0:-10] + href
                    else:
                        href = self.url + href

            fiction = {'url': href, 'text': fiction_el.string, 'code': path}

            fiction_catalog_list.append(fiction)

        return fiction_catalog_list

    def resolve_content(self,res, selector):
        soup = BeautifulSoup(res, 'html.parser')
        content_el = soup.select(selector)

        if content_el is not None and len(content_el) > 0:
            content = self.filter_tags(str(content_el[0]))
            return content

    def resolve_intro(self):
        pass

    def filter_tags(self,htmlstr):
        # 先过滤CDATA
        re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)  # 匹配CDATA
        re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I)  # Script
        re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I)  # style
        re_br = re.compile('<br\s*?/?>')  # 处理换行
        re_h = re.compile('</?\w+[^>]*>')  # HTML标签
        re_comment = re.compile('<!--[^>]*-->')  # HTML注释
        s = re_cdata.sub('', htmlstr)  # 去掉CDATA
        s = re_script.sub('', s)  # 去掉SCRIPT
        s = re_style.sub('', s)  # 去掉style
        s = re_br.sub('\n', s)  # 将br转换为换行
        s = re_h.sub('', s)  # 去掉HTML 标签
        s = re_comment.sub('', s)  # 去掉HTML注释
        # 去掉多余的空行
        blank_line = re.compile('\n+')
        s = blank_line.sub('\n', s)
        s = self.replaceCharEntity(s)  # 替换实体
        return s

    def replaceCharEntity(self,htmlstr):
        CHAR_ENTITIES = {'nbsp': ' ', '160': ' ',
                         'lt': '<', '60': '<',
                         'gt': '>', '62': '>',
                         'amp': '&', '38': '&',
                         'quot': '"', '34': '"', }

        re_charEntity = re.compile(r'&#?(?P<name>\w+);')
        sz = re_charEntity.search(htmlstr)
        while sz:
            entity = sz.group()  # entity全称，如&gt;
            key = sz.group('name')  # 去除&;后entity,如&gt;为gt
            try:
                htmlstr = re_charEntity.sub(CHAR_ENTITIES[key], htmlstr, 1)
                sz = re_charEntity.search(htmlstr)
            except KeyError:
                # 以空串代替
                htmlstr = re_charEntity.sub('', htmlstr, 1)
                sz = re_charEntity.search(htmlstr)
        return htmlstr

