from bs4 import BeautifulSoup
from urllib import parse
from .default import Rule as DefaultRule


class Rule(DefaultRule):
    def __init__(self,url):
        self.url=url

    def resolve_catalog(self,res, selector='#info ul li > a'):
        return super(Rule, self).resolve_catalog(res,selector)

        # soup = BeautifulSoup(res, 'html.parser')
        # fiction_list = soup.select('#info ul li > a')
        #
        # fiction_catalog_list = []
        # uri = parse.urlparse(self.url)
        # domain = uri.scheme + '://' + uri.netloc
        #
        # for fiction_el in fiction_list:
        #     href = fiction_el.attrs['href']
        #     href_url = parse.urlparse(href)
        #
        #     path = href_url.path[href_url.path.rfind('/')+1:-5]
        #
        #     if not href.startswith(uri.scheme):
        #         if href.startswith('/'):
        #             href = domain+href
        #         else :
        #             if self.url.endswith('.html'):
        #                 href = self.url[0:-10] + href
        #             else:
        #                 href = self.url + href
        #
        #     fiction = {'url': href, 'text': fiction_el.string, 'code': path}
        #
        #     fiction_catalog_list.append(fiction)
        #
        # return fiction_catalog_list

    def resolve_content(self,res, selector='#content1'):
        return super(Rule, self).resolve_content(res, selector)
        # soup = BeautifulSoup(res,'html.parser')
        # content_el = soup.select('#content1')
        #
        # if content_el is not None and len(content_el) > 0:
        #     content = self.filter_tags(str(content_el[0]))
        #     return content

    def resolve_intro(self):
        pass