import requests
import os
import urllib
from urllib import parse,request
from urllib.parse import unquote
import re
from xxqg.db import *
import traceback
import hashlib
import shutil

db = urls()

def gethtml(url):
    if url is None:
        return ''
    htmlcontent = ''
    try:
        headers = {
            "User-Agent": "User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)"}
        r = requests.get(url,headers=headers)
        htmlcontent = r.content.strip()
        if htmlcontent == '' or htmlcontent==None:
            return ''
        htmlcontent = htmlcontent.decode('UTF-8')
        htmlcontent = htmlcontent.replace('&copy=1','')
        return htmlcontent
    except:
        return ''

def geturlandsavepath(pageurl,url):
    url = unquote(url,'utf-8').strip()
    fullurl = parse.urljoin(pageurl, url)
    fullurl = fullurl.strip('/').strip()

    parseurl = parse.urlparse(fullurl)
    savepath = parseurl.path

    if parseurl.path.find('.')==-1:
        savepath = savepath+'/index.html'
    if parseurl.netloc in ['www.xuexi.cn','source.xuexi.cn','bootcdn.xuexi.cn','video.xuexi.cn','preview-pdf.xuexi.cn']:
        fullurl = '{}://{}{}'.format(parseurl.scheme,parseurl.netloc,parseurl.path)
        savepath = 'xxqg/'+parseurl.netloc.replace('.xuexi.cn','')+savepath
        return fullurl,savepath
    elif parseurl.netloc in ['xxqg-pc-pdf.oss-cn-beijing-zxb-d01-a.ops.xxqg.cn']:
        fullurl = '{}://{}{}'.format(parseurl.scheme,parseurl.netloc,parseurl.path)
        savepath = 'xxqg/preview-pdf'+savepath
        return fullurl,savepath
    else:
        return fullurl,''

def checkurl(url):
    check = False
    if url == None or len(url.strip())==0:
        return check
    if url.find('http') == -1 and url.find('.') > -1:
        check = True
    elif url.find('.xuexi.cn') > -1:
        check = True
    return check

def savehtml(filepath,html):
    try:
        html = html.replace('&copy=1','')
        dir = os.path.dirname(filepath)
        if os.path.exists(dir) == False:
            os.makedirs(dir)
        with open(filepath, 'w', encoding="utf-8") as file:
            file.write(html)
            file.close()
    except:
        print(filepath)
        traceback.print_exc()

def downcssdata(model):
    links1 = {}
    links2 = {}

    with open(model.savepath, 'r', encoding='utf-8') as f:
        content = f.read()
        links1.clear()
        links2.clear()
        exts = ['jpg', 'png', 'gif', 'jpeg', 'png', 'bmp']
        for ext in exts:
            for url in re.findall(r'[^"|''|\)|\(|\{|\}|\<|\>]+?\.'+ext,content):
                fullurl,savepath = geturlandsavepath(model.fromurl,url)
                links1[fullurl] = savepath
                links2[fullurl] = url
        for fullurl in links1:
            savepath = links1[fullurl]
            oriurl = links2[fullurl]
            db.addurl(fullurl,savepath=savepath,fromurl=model.url,filetype=2,filestatus=0,deepth=model.deepth+1)
            content = content.replace(oriurl,'http://zt.qjw.jw/'+savepath)
    with open(model.savepath,'w',encoding='utf-8') as f:
        f.write(content)

def getpdfsavepath(url):
    url = re.sub(r'https:.*?https','https',url)
    url = unquote(url,'utf-8')
    parseurl = parse.urlparse(url)
    savepath = parseurl.path
    savepath = 'xxqg/preview-pdf' + savepath.replace('.pdf','.zip')
    return savepath

def downjsdata(model):
    links1 = {}   #存放保存路径
    links2 = {}   #存放匹配字符串
    ext = os.path.splitext(model.savepath)[-1]
    if ext == '.css':
        downcssdata(model)
        return
    if ext != '.js':
        return
    content = ''
    with open(model.savepath, 'r', encoding='utf-8') as f:
        content = f.read()
        links1.clear()
        links2.clear()
        exts = ['jpg', 'png', 'gif', 'jpeg', 'png', 'bmp', 'mp4', 'mp3', 'js','css','json']
        for ext in exts:
            for url in re.findall(r'(https://[^"|''|\)|\(|\{|\}|\<|\>]+?\.'+ext+')[^a-zA-Z]+',content):
                fullurl,savepath = geturlandsavepath(model.fromurl,url)
                links1[fullurl] = savepath
                links2[fullurl] = url
        for fullurl in links1:
            savepath = links1[fullurl]
            oriurl = links2[fullurl]
            db.addurl(fullurl,savepath=savepath,fromurl=model.url,filetype=2,filestatus=0,deepth=model.deepth+1)
            content = content.replace(oriurl,'http://zt.qjw.jw/'+savepath)

        links1.clear()
        links2.clear()
        exts = ['pdf']
        for ext in exts:
            for url in re.findall(r'(https://[^"|''|\)|\(|\{|\}|\<|\>]+?\.'+ext+')[^a-zA-Z]+',content):
                fullurl = url
                savepath = getpdfsavepath(fullurl)
                links1[fullurl] = savepath
                links2[fullurl] = url
        for fullurl in links1:
            savepath = links1[fullurl]
            oriurl = links2[fullurl]
            db.addurl(fullurl,savepath=savepath,fromurl=model.url,filetype=2,filestatus=0,deepth=model.deepth+1)
            content = content.replace(oriurl,'http://zt.qjw.jw/'+savepath)

        links1.clear()
        links2.clear()
        exts = ['html']
        for ext in exts:
            for url in re.findall(r'(https://[^"|''|\)|\(|\{|\}|\<|\>]+?\.'+ext+')[^a-zA-Z]+',content):
                fullurl,savepath = geturlandsavepath(model.fromurl,url)
                links1[fullurl] = savepath
                links2[fullurl] = url
        for fullurl in links1:
            savepath = links1[fullurl]
            oriurl = links2[fullurl]
            db.addurl(fullurl,savepath=savepath,fromurl=model.url,filetype=1,filestatus=0,deepth=model.deepth+1)
            content = content.replace(oriurl,'http://zt.qjw.jw/'+savepath)

    with open(model.savepath,'w',encoding='utf-8') as f:
        f.write(content)


def downres(pageurl,html,deepth):
    new_urls = set()
    links1 = {}   #存放保存路径
    links2 = {}   #存放匹配字符串

    links1.clear()
    resexts = ['jpg','png','gif','jpeg','png','bmp','pdf','mp4','mp3','js','css']
    for ext in resexts:
        for url in re.findall(r'([^"|''|\)|\(|\{|\}|\<|\>]+?\.'+ext+')[^a-zA-Z]+',html):
            fullurl,savepath = geturlandsavepath(pageurl,url)
            links1[fullurl] = savepath
            links2[fullurl] = url
    for fullurl in links1:
        savepath = links1[fullurl]
        oriimg = links2[fullurl]
        db.addurl(fullurl,savepath=savepath,fromurl=pageurl,filetype=2,filestatus=0,deepth=0)
        html = html.replace(oriimg,'http://zt.qjw.jw/'+savepath)

    links1.clear()
    links2.clear()
    pageexts = ['html']
    for ext in pageexts:
        for url in re.findall(r'([^"|''|\)|\(|\{|\}|\<|\>]+?\.'+ext+')[^a-zA-Z]+',html):
            fullurl,savepath = geturlandsavepath(pageurl,url)
            links1[fullurl] = savepath
            links2[fullurl] = url
    for fullurl in links1:
        savepath = links1[fullurl]
        oriimg = links2[fullurl]
        db.addurl(fullurl,savepath=savepath,fromurl=pageurl,filetype=1,filestatus=0,deepth=0)
        html = html.replace(oriimg,'http://zt.qjw.jw/'+savepath)
    return html

def report(count, blockSize, totalSize):
    percent = int(count*blockSize*100/totalSize)
    print("\r%d%%" % percent + ' complete',end=' ')

def downfile(src, local):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36",
        #":authority": "www.xuexi.cn",
        #":method": "GET",
        #":path": filepath,
        #":scheme": parseurl.scheme,
        "upgrade-insecure-requests":"1",
        "accept-language":"zh-CN,zh;q=0.9",
        "accept-encoding":"gzip, deflate, br",
        "referer":"https://www.xuexi.cn/"
    }
    try:
        dir = os.path.dirname(local)
        if os.path.isdir(dir) == False:
            os.makedirs(dir)
        if os.path.isfile(local) == True:
            os.unlink(local)
            pass

        srcext = os.path.splitext(src)[-1]
        if srcext == '.pdf':
            src = re.sub(r'https.*?https',r'https',src)
            src = unquote(src,'utf-8')

            errortime = 0
            index = 0
            filename = os.path.split(local)[-1].replace('.zip','')
            imgdir = os.path.join(dir,filename)
            if os.path.isdir(imgdir) == False:
                os.makedirs(imgdir)
            while errortime<5:
                imgurl = '{}/doc/I/{}'.format(src,index)
                imgpath = os.path.join(imgdir,'{}.jpg'.format(index))
                imgurl = imgurl.replace('xxqg-pc-pdf.oss-cn-beijing-zxb-d01-a.ops.xxqg.cn','preview-pdf.xuexi.cn')
                r = requests.get(imgurl, timeout=30)
                if os.path.isfile(imgpath) == True:
                    os.unlink(imgpath)
                if r.status_code==200:
                    with open(imgpath, 'wb') as f:
                        f.write(r.content)
                        f.close()
                    errortime = 0
                else:
                    errortime = errortime+1
                index = index + 1
            shutil.make_archive(imgdir,'zip',root_dir=imgdir)
            shutil.rmtree(imgdir)
        else:
            #print('正在下载:{}'.format(src))
            urllib.request.urlretrieve(src,local,reporthook=report)
            #print('')
    except:
        print(src)
        pass
        traceback.print_exc()


