from bs4 import BeautifulSoup
import urllib.request
import urllib.parse
import bs4PaseLibs
import re
import os
import pathlib
import json
from urllib.request import urlretrieve
import urllib3
import time
from tqdm import tqdm
import chardet
import tools
class HTMLBYLIBS:
        '''
        tmzdy的网站解析下载器
        '''
        def __init__(self,url="",cssLinksFun=None,jsLinksFun = None,imgLinksFun=None,styleContent=None) -> None:
                '''
                解析配置
                @url:解析的目标网址,
                @cssLinksFun:解析css link文件的解析器函数
                '''
                self.url = url
                urlparse = urllib.parse.urlparse(self.url)
                self.host = urlparse.scheme+"://"+urlparse.hostname
                self.fileName = os.path.basename(os.path.normpath(urlparse.path))
                suffix = pathlib.Path(os.path.normpath(urlparse.path)).suffix
                self.outMk = os.path.normpath(os.getcwd()+"/html")
                if not os.path.exists(self.outMk):
                        os.mkdir(self.outMk)
                if self.fileName =="" or self.fileName =="/":
                        self.fileName = "index.html"
                else:
                        if suffix == "":
                                self.fileName = self.fileName+".html"
                self.__cssLinksFun = cssLinksFun
                self.__jsLinksFun = jsLinksFun
                self.__imgLinksFun= imgLinksFun
                self.__styleContent = styleContent
              
        def __getUrlContent(self,keyword="")->BeautifulSoup:
                url = self.url
                key_code = urllib.request.quote(keyword)  # 对请求进行编码
                url_all = url+key_code
                header = {
                        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
                }   #头部信息
                request = urllib.request.Request(url_all,headers=header)
                reponses = urllib.request.urlopen(request)
                reponse = reponses.read()
                print("获取内容成功正在解析...")
                self.html = BeautifulSoup(reponse,'lxml')
                self.__saveHtml(c= str(self.html).encode())
                return self.html

        def __saveHtml(self,fileName:str="",c:str="")->bool:
                fname = fileName
                if not c or c=="":
                        print("参数有误")
                        return False
                if fname == "":
                        fname = os.path.normpath(self.outMk+"/"+self.fileName) 
                fh = open(fname,"wb")    # 将文件写入到当前目录中
                fh.write(c)
                fh.close()
                return True
        # 解析css文件，html内容文件中的背景图片。
        def __parseImg(self):
                '''
                js文件链接解析器
                '''
                if self.__imgLinksFun is not None:
                        treg = self.__imgLinksFun(self.html,self.url)
                # 如果未提供处理方法，使用内置的css处理方法
                else:
                        treg = bs4PaseLibs.pageBodyLinksImg(self.html,self.url)
                return treg

        def __parseCssFile(self):
                '''
                css文件链接解析器
                '''
                treg = []
                # cssdiv = self.html.find_all(bs4PaseLibs.HasAttrs)
                # v如果单独提供了css文件链接处理方法，则使用自定义的处理函数。
                if self.__cssLinksFun is not None:
                        treg = self.__cssLinksFun(self.html,self.url)
                # 如果未提供处理方法，使用内置的css处理方法
                else:
                        treg = bs4PaseLibs.pageBodyLinksCss(self.html,self.url)
                return treg
        def __parseJsFile(self):
                '''
                js文件链接解析器
                '''
                if self.__jsLinksFun is not None:
                        treg = self.__jsLinksFun(self.html,self.url)
                # 如果未提供处理方法，使用内置的css处理方法
                else:
                        treg = bs4PaseLibs.pageBodyLinksJs(self.html,self.url)
                return treg
        def __parseStyle(self):
                '''
                js文件链接解析器
                '''
                if self.__styleContent is not None:
                        treg = self.__styleContent(self.html,self.url)
                # 如果未提供处理方法，使用内置的css处理方法
                else:
                        treg = bs4PaseLibs.pageBOdyStyleContent(self.html,self.url)
                return treg
        def __saveLoadList(self):
                if not self.loadList:
                        print("error:没有可保存的下载列表")
                        return False
           
                with open('loadlist.json', 'w') as f:
                        json.dump(self.loadList, f)
                return True
        
        def __downloadAll(self):
                pbar = tqdm(total=100)
                nowloadstr = ""
                i=0
                def cbk(a, b, c): 
                        '''回调函数
                        @a: 已经下载的数据块
                        @b: 数据块的大小
                        @c: 远程文件的大小
                        '''
                        if a==b==c==0:
                                pbar.set_description_str('['+str(i)+'/'+str(len(self.loadList))+'][error]-->'+nowloadstr)
                                pbar.update(0)
                        else:
                                per = 100.0 * a * b / c 
                                if per > 100: 
                                        per = 100
                                pbar.set_description_str('['+str(i)+'/'+str(len(self.loadList))+']-->'+nowloadstr)
                                pbar.update(per)
                
                for el in self.loadList:
                        dirp = el['parentpath']
                        if dirp == "/" or dirp == "\\":
                                dirp = self.outMk
                        else:
                                dirp = os.path.join(self.outMk+el['parentpath'])
                      
                        if not os.path.exists(dirp):
                                os.makedirs(dirp)
                        loadfile = os.path.join(dirp,el["filename"])
                        nowloadstr = el["filename"]
                        i+=1
                        if not os.path.exists(loadfile):
                                try:
                                        time.sleep(0.5)
                                        urlretrieve(el["url"],loadfile,cbk)
                                except:
                                        el["isload"] = False
                                        cbk(0,0,0)
                                        # print("error:",loadfile)
                                else:
                                        el["isload"] = True
                        el["newpath"] = loadfile

        def parse(self):
                try:
                        self.__getUrlContent()
                        reg = self.__parseImg()
                        reg1 = self.__parseCssFile()
                        reg2 = self.__parseJsFile()
                        reg3 = self.__parseStyle()
                        totalstr = reg+reg1+reg2+reg3
                        print("成功解析整站资源路径，正在开始下载资源到本地...")
                        # 过滤重复的路径资料,如果相同的要去除防止重复下载.
                        relatotalList = tools.filterListDict(totalstr)
                        dt = []
                        for el in relatotalList:
                                el['isload'] = False
                                el['newpath'] = ""
                                dt.append(el)
                        self.loadList = dt
                        self.__saveLoadList()
                        self.__downloadAll()
                        return dt
                except  Exception :
                        print("程序错误")
                
         
                




        






