from pyquery import PyQuery as pq
import urllib.parse


class HtmlParser(object):
    def _get_index_urls(self,page_url,doc):
        new_urls = set()
        src_list = set()

        links = doc('a').items()
        for link in links:
            new_url = link.attr.href
            if new_url.__len__ ==0:
                continue
            check = False
            if new_url.find('http')==-1 and new_url.find('.')>-1:
                check = True
            elif new_url.find('tv.81.cn')>-1:
                check = True
            if check == True:
                new_full_url = urllib.parse.urljoin(page_url, new_url)
                new_urls.add(new_full_url)

        srcs = doc('img').items()
        for src in srcs:
            src = src.attr('src')
            if src.__len__ ==0:
                continue
            check = False
            if src.find('http')==-1:
                check = True
            elif src.find('tv.81.cn')>-1:
                check = True
            if check == True:
                src = urllib.parse.urljoin(page_url,src)
                src_list.add(src)
        return new_urls,src_list


    def _get_node_urls(self,page_url,doc):
        new_urls = set()
        src_list = set()

        links = doc('.content a').items()
        for link in links:
            new_url = link.attr.href
            if new_url.__len__ ==0:
                continue
            check = False
            if new_url.find('http')==-1 and new_url.find('.')>-1:
                check = True
            elif new_url.find('tv.81.cn')>-1:
                check = True
            if check == True:
                new_full_url = urllib.parse.urljoin(page_url, new_url)
                new_urls.add(new_full_url)

        print(page_url+"—— "+'|'.join(new_urls))

        srcs = doc('.content img').items()
        for src in srcs:
            src = src.attr('src')
            if src.__len__ == 0:
                continue
            check = False
            if src.find('http')==-1:
                check = True
            elif src.find('tv.81.cn')>-1:
                check = True
            if check == True:
                src = urllib.parse.urljoin(page_url,src)
                src_list.add(src)
        return new_urls,src_list

    def _get_content_urls(self,page_url,doc):
        new_urls = set()
        src_list = set()

        links = doc('.content a').items()
        for link in links:
            new_url = link.attr('href')
            if new_url.__len__ ==0:
                continue
            check = False
            if new_url.find('http')==-1 and new_url.find('.')>-1:
                check = True
            elif new_url.find('tv.81.cn')>-1:
                check = True
            if check == True:
                new_full_url = urllib.parse.urljoin(page_url, new_url)
                new_urls.add(new_full_url)
        srcs = doc('.content img').items()
        for src in srcs:
            src = src.attr('src')
            if src.__len__ ==0:
                continue
            check = False
            if src.find('http')==-1:
                check = True
            elif src.find('tv.81.cn')>-1:
                check = True
            if check == True:
                src = urllib.parse.urljoin(page_url,src)
                src_list.add(src)
        return new_urls,src_list


    def paser(self, page_url, html_cont):
        if page_url is None or html_cont is None or page_url.find('81.cn')==-1:
            return None,None
        new_urls, new_data = None,None
        doc = pq(html_cont)
        if page_url.find('node_')>-1:
            new_urls,new_data = self._get_node_urls(page_url, doc)
        elif page_url.find('content_')>-1:
            new_urls, new_data = self._get_content_urls(page_url, doc)
        elif page_url.find('2018zt/103248.htm')>-1:
            new_urls,new_data = self._get_index_urls(page_url, doc)
        return new_urls, new_data
