import urllib3
import urllib.request
import pathlib
import re
import os
import time
def getContentByurl(url="")->str:
        time.sleep(0.3)
        header = {
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
        }   
        #头部信息
        request = urllib.request.Request(url,headers=header)
        reponse = urllib.request.urlopen(request).read()
        return reponse.decode()
def urlParse(url:str)->dict:

        # 检测资源是否是主域名下,可能是外链资源
        
        urls = urllib3.util.parse_url(url)
        urlname = urls.scheme+"://"+urls.host
        
        if urls.path and urls.path is not None and urls.path!="/":
                pathobj = pathlib.Path(urls.path)
                parentUrlBasename  =  urllib.request.pathname2url(str(pathobj.parent))
                return {
                        "host":urlname,
                        "path":urls.path,
                        "parentpath":parentUrlBasename,
                        "parentHostPath":urlname+parentUrlBasename,
                        "filename":pathobj.name,
                        "url":urls.url,
                        "suffix":pathobj.suffix,
                        "value":url,
                        "scheme":urls.scheme
                }
        else:
                return {
                        "host":urlname,
                        "path":"",
                        "parentpath":"",
                        "parentHostPath":"",
                        "filename":"index.html",
                        "url":urls.url,
                        "suffix":".html",
                        "value":url,
                        "scheme":urls.scheme
                }
    

def checkPathUrl(url:str,current:str,isDelPointPath:bool = False):
        '''
        通过页面的地址和资源的地址来解析出资源地址的真实下载路径,和层级关系.
        @url 待解析的目标网址
        @current url网址当前所在的网站路径
        @isDelPointPath 是否删除路径带.的路径层级,比如/index.php/a/b.html,被解析为/a/b.html
        '''
        '''
        路径可以分为以下几种：
        //  开头，无头网址，需要结合页面路径添加头判断是http还是https
        / 开头，绝对路径
        ../ 开头，相对路径
        http 开头，外链或者绝对url路径
        '''

        currentObj = urlParse(current)
        if url[0:2]=="//":
                url = currentObj.get("scheme")+":"+ url
        urlHostUrlisNo = re.match('^(http|https)://.+$',url)
        
        loadurl={}
        
        if urlHostUrlisNo is None:
                
                # 检测是否处于../../这样的相对路径
                parentJoin = "" #向上越级
                realPath = url #去掉相对路径的路径.
                lastfindindex = url.rfind("../")
                if lastfindindex>-1:
                        parentJoin = url[:lastfindindex+2]
                        realPath = url[lastfindindex+2:]
                ppath:str = currentObj.get("parentpath")
                # 有的网站因为是虚拟路径,和实际真实的路径可能不符.比如页面路径为index/p,但实际上index/ = /路径.
                if isDelPointPath:
                        # 如果路径第一个字符是/,表示是绝对路径，则应该把当前路径置为根路径下，而不是相对的css路径下。
                        if url[0:1] == "/" or (ppath[0:1] == "/" and urlHostUrlisNo is not None):
                                realParentPath = ""
                        else:
                                realParentPath = urllib.request.pathname2url(os.path.normpath(os.path.join(ppath,parentJoin)))
                        
                        # 如果往上走时，到了根路径时，需要把/置为“”
                        if realParentPath=="/" or realParentPath == "\\":
                                realParentPath=""
                        
                        loadurl = urlParse(currentObj.get("host")+realParentPath+realPath)
                else:
                        loadurl = urlParse(currentObj.get("host")+realPath)
        else:
                loadurl = urlParse(url)
        return loadurl
    
def checkUrlVaild(url:str):
        '''
        检测该链接是否是有效地址
        @url 待检测的链接地址
        '''
        header = {
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
        }   
        #头部信息
        request = urllib.request.Request(url,headers=header)
        try:
                urllib.request.urlopen(request)
                return True
        except:
                return False

def filterListDict(d:list[dict]):
        test = set()
        newtest = []
        for el in d:
                t = tuple(el.items())
                if t not in test:
                        test.add(t)
                        newtest.append(el)
        return newtest
       
        
    