import requests
import chardet
import os
import re
from urllib import parse,request
from urllib.parse import unquote
import re
from pyquery import PyQuery as pq
from zgjw_zt.db import *
from contextlib import closing
import subprocess


db = urls()

def gethtml(url):
    if url is None:
        return ''
    htmlcontent = ''
    try:
        headers = {
            "User-Agent": "User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)"}
        r = requests.get(url,headers=headers)
        htmlcontent = r.content.strip()
        if htmlcontent == '' or htmlcontent==None:
            return ''
        char = chardet.detect(htmlcontent)
        htmlcontent = htmlcontent.decode(char['encoding'])
        htmlcontent = htmlcontent.replace(':80/','/')
        htmlcontent = re.sub(r'<!--\[if lt[\s\S]*\[endif\]-->','',htmlcontent)
        return htmlcontent
    except:
        return ''

def getdirpre(savepath):
    strarray = savepath.split('/')
    num = len(strarray)
    pre = []
    for i in range(num-1):
        pre.append('../')
    pre = ''.join(pre)
    return pre

def geturlandsavepath(pageurl,url):
    url = unquote(url,'utf-8').strip()
    url = url.replace(':80','')
    fullurl = parse.urljoin(pageurl, url)
    fullurl = fullurl.strip('/').strip()

    parseurl = parse.urlparse(fullurl)
    savepath = parseurl.path

    if parseurl.path.find('.')==-1:
        savepath = savepath+'/index.html'
    if parseurl.netloc in ['www.81.cn','tv.81.cn','photo.81.cn']:
        fullurl = '{}://{}{}'.format(parseurl.scheme,parseurl.netloc,parseurl.path)
        savepath = 'xxqj/'+parseurl.netloc.replace('.81.cn','')+savepath
        return fullurl,savepath
    else:
        return fullurl,''


def checkurl(url):
    check = False
    if url == None or len(url.strip())==0:
        return check
    if url.find('http') == -1 and url.find('.') > -1:
        check = True
    elif url.find('photo.81.cn') > -1:
        check = True
    elif url.find('tv.81.cn') > -1:
        check = True
    return check


def savehtml(filepath,html):
    dir = os.path.dirname(filepath)
    if os.path.exists(dir) == False:
        os.makedirs(dir)
    doc = pq(html)
    doc('head').append('<script src="http://img.qjw.jw/main/js/FrontPage.js" ></script>')
    doc('body').before('<script type="text/javascript">FrontPage("nav",document.charset)</script>')
    doc('body').append('<script type="text/javascript">FrontPage("copyright",document.charset)</script>')
    html = doc.outer_html()
    with open(filepath, 'w', encoding="utf-8") as file:
        html = html.replace('&#13;', '')
        html = html.replace('<div class="sidebar hidden-xs hidden-sm"></div>','<div class="xxqj_sidebar hidden-xs hidden-sm"><script src="http://zt.qjw.jw/xxqj/sider.js"></script></div>')
        file.write(html)
        file.close()


def downres(pageurl,html):
    doc = pq(html)
    new_urls = set()

    links = doc('link').items()
    for link in links:
        href = link.attr.href
        check = checkurl(href)
        if check == False:
            link.remove()
            continue
        fullurl,savepath = geturlandsavepath(pageurl,href)
        if savepath == '':
            link.remove()
            continue
        ext = os.path.splitext(savepath)[-1]
        if ext != '.css':
            continue
        content = ''
        href = unquote(href, 'utf-8').strip()
        link.attr.href =  '/'+savepath
        db.addurl(url=fullurl,savepath=savepath,fromurl=pageurl,filetype=2,filestatus=0)

    links = doc('img').items()
    for link in links:
        href = link.attr.src
        check = checkurl(href)
        if check == False:
            link.remove()
            continue
        fullurl,savepath = geturlandsavepath(pageurl,href)
        if savepath == '':
            link.remove()
            continue
        href = unquote(href, 'utf-8').strip()
        link.attr.src = '/'+ savepath
        db.addurl(url=fullurl,savepath=savepath,fromurl=pageurl,filetype=2,filestatus=0)

    links = doc('script').items()
    for link in links:
        href = link.attr.src
        if href is None or len(href) < 5:
            continue
        check = checkurl(href)
        if check == False:
            link.remove()
            continue
        fullurl,savepath = geturlandsavepath(pageurl,href)
        if savepath == '':
            link.remove()
            continue
        href = unquote(href, 'utf-8').strip()
        link.attr.src = '/'+savepath
        db.addurl(url=fullurl,savepath=savepath,fromurl=pageurl,filetype=2,filestatus=0)

    html = doc.outer_html()
    return html


def downfile(srcUrl, localFile):
    headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
    with closing(requests.get(srcUrl,headers=headers,stream=True)) as response:
        chunk_size = 1024  # 单次请求最大值
        content_size = int(response.headers['content-length'])  # 内容体总大小

        oldsize = 0
        if os.path.isfile(localFile) == True:
            oldsize = os.path.getsize(localFile)
            oldsize = int(oldsize)
            if oldsize<content_size:
                os.unlink(localFile)
            else:
                return
        dir = os.path.dirname(localFile)
        if os.path.isdir(dir)==False:
            os.makedirs(dir)
        filename = os.path.basename(localFile)

        data_count = 0
        with open(localFile, "wb") as file:
            for data in response.iter_content(chunk_size=chunk_size):
                file.write(data)
                data_count = data_count + len(data)
                now_jd = (data_count / content_size) * 100
                print("\r文件下载进度：%d%%(%.2f/%.2f MB) - %s" % (now_jd, data_count/1024/1024, content_size/1024/1024, filename), end=" ")
        print('\r')

def down_m3u8(id,filepath):
    headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36","Host":"yspmvms.81.cn"}
    newurl = 'http://yspmvms.81.cn/?id={}&callbackparam=jQuery111305642970661431972_1546766302660&ctype=sd&ttype=pc&_=1546766302661'.format(id)
    text = requests.get(newurl,headers=headers).text
    m3u8_url = re.findall(r'(http:.*m3u8)',text)[0]
    filename = os.path.basename(m3u8_url)
    headers = {'X-Requested-With':'ShockwaveFlash/27.9.9.999',"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
    text = requests.get(m3u8_url,headers=headers).text
    matches = re.findall(r'{}_\d+\.ts'.format(filename),text,re.M)
    total = len(matches)
    count = 0
    dir = os.path.dirname(filepath)
    if os.path.isdir(dir) == False:
        os.makedirs(dir)
    for match in matches:
        realurl = '{}{}'.format(m3u8_url,match.replace(filename,''))
        header = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"}
        response = requests.get(realurl, timeout=120, headers=header)
        with open(filepath, mode='ab+') as f:
            f.write(response.content)
        count = count+1
        print("\r正在下载视频文件%s,共有%d个切片，已下载%d个" % (filename,total, count),
              end=" ")
    newpath = filepath+'.mp4'
    print('\r开始转换m3u8文件')
    cmd_str = 'D:\\soft\\ffmpeg\\bin\\ffmpeg.exe -v quiet -i \"' + filepath + '\" ' + '-acodec copy -vcodec copy -absf aac_adtstoasc \"' + newpath + '\" '
    subprocess.call(cmd_str, shell=True)
    os.unlink(filepath)
    os.rename(newpath, filepath)
    print('转换完成')
