from pyquery import PyQuery as pq
import urllib.parse
from module.Function import *
import os
import re


class Parser(object):
    def paser(self, page_url, html_cont, basedir):
        doc = pq(html_cont)
        new_urls = set()

        links = doc('link').items()
        for link in links:
            new_url = link.attr.href
            if len(new_url) < 5:
                continue
            check = False
            if new_url.find('http') == -1 and new_url.find('.') > -1:
                check = True
            elif new_url.find('tv.81.cn') > -1:
                check = True
            if check == True:
                new_full_url = urllib.parse.urljoin(page_url, new_url)
                filepath = downloadfile(new_full_url,basedir)
                filename,ext = os.path.splitext(filepath)
                if ext != '.css':
                    continue
                content = ''
                if os.path.isfile(filepath):
                    try:
                        with open(filepath,'r', encoding='utf-8') as f:
                            content = f.read()
                    except Exception:
                        print(filepath)

                pattern = re.compile(r"url\((.*?)\)")
                match = pattern.findall(content)
                if match :
                    for img in match:
                        img = img.strip('"')
                        img = img.strip("'")
                        imgurl = urllib.parse.urljoin(new_full_url,img)
                        new_urls.add(imgurl)

        links = doc('img').items()
        for link in links:
            new_url = link.attr.src
            if len(new_url) < 5:
                continue
            check = False
            if new_url.find('http') == -1 and new_url.find('.') > -1:
                check = True
            elif new_url.find('tv.81.cn') > -1:
                check = True
            if check == True:
                new_full_url = urllib.parse.urljoin(page_url, new_url)
                new_urls.add(new_full_url)

        links = doc('script').items()
        for link in links:
            new_url = link.attr.src
            if new_url is None or len(new_url) < 5:
                continue
            check = False
            if new_url.find('http') == -1 and new_url.find('.') > -1:
                check = True
            elif new_url.find('tv.81.cn') > -1:
                check = True
            if check == True:
                new_full_url = urllib.parse.urljoin(page_url, new_url)
                new_urls.add(new_full_url)

        for url in new_urls:
            downloadfile(url,basedir)

        return new_urls,html_cont
