import requests, chardet, re, os, threading, subprocess
from contextlib import closing
from urllib import parse
from urllib.parse import unquote
from pyquery import PyQuery as pq
from peewee import *
from peaktask.setting import *

threadLock = threading.Lock()
def getdb():
    if DBTYPE == 'mysql':
        return MySQLDatabase(DBNAME, host=HOST, user=USER, passwd=PASSWD, port=PORT)
    elif DBTYPE == 'sqlite':
        return SqliteDatabase(SQLITEDBNAME)

datadb = getdb()

class BaseModel(Model):
    class Meta:
        database = datadb
class Urls(BaseModel):
    id = PrimaryKeyField()
    url = CharField(null = False)
    fromurl = CharField(default = '')
    deepth = IntegerField(default = 0)
    savepath = CharField(default = '')
    #type:'1为网页,2为资源，3为音视频，4为其他,5为列表'
    filetype = IntegerField(default = 0)
    filestatus = IntegerField(default = 0)
    fileflag = IntegerField(default = 0)
    ztname = CharField(default='')

    class Meta:
        db_table = 'zgjw_urls'
        order_by = 'id'

    def delurl(url,**args):
        threadLock.acquire()
        model = Urls.get_or_none(Urls.url == url, Urls.savepath == args['savepath'])
        if model != None:
            model.delete_instance()
        threadLock.release()

    def addurl(url,**args):
        threadLock.acquire()
        if url:
            model = Urls.get_or_none(Urls.url == url, Urls.savepath == args['savepath'])
            if model == None:
                Urls.create(url=url,**args)
        threadLock.release()

    def popurl(type=0):
        threadLock.acquire()
        model = None
        left = 0
        if type==0:
            model = Urls.get_or_none(Urls.filestatus == 0)
        else:
            model = Urls.get_or_none(Urls.filestatus == 0 and Urls.filetype == type)
        if model is not None:
            model.filestatus=5
            model.save()
            left = Urls.select().where(Urls.filestatus == 0).count()
        else:
            left = 0
        threadLock.release()
        return model,left

    def setstatus(id,filestatus):
        threadLock.acquire()
        Urls.update(filestatus=filestatus).where(Urls.id == id).execute()
        threadLock.release()

class ZTSpider(object):
    def __init__(self):
        self.datadir = ''
        self.ztlist = []
        self.ztname = ''
        self.allowhosts = ['www.81.cn','tv.81.cn','photo.81.cn']

    def gethtml(self,url):
        if url is None:
            return ''
        htmlcontent = ''
        try:
            headers = {
                "User-Agent": "User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)"}
            r = requests.get(url, headers=headers)
            htmlcontent = r.content.strip()
            if htmlcontent == '' or htmlcontent == None:
                return ''
            char = chardet.detect(htmlcontent)
            htmlcontent = htmlcontent.decode(char['encoding'])
            htmlcontent = htmlcontent.replace(':80/', '/')
            htmlcontent = re.sub(r'<!--\[if lt[\s\S]*\[endif\]-->', '', htmlcontent)
            return htmlcontent
        except:
            return ''

    def getdirpre(self,savepath):
        strarray = savepath.split('/')
        num = len(strarray)
        pre = []
        for i in range(num - 1):
            pre.append('../')
        pre = ''.join(pre)
        return pre

    def geturlandsavepath(self,pageurl,url):
        url = unquote(url,'utf-8').strip()
        url = url.replace(':80','')
        fullurl = parse.urljoin(pageurl, url)
        fullurl = fullurl.strip('/').strip()

        parseurl = parse.urlparse(fullurl)
        savepath = parseurl.path

        if parseurl.path.find('.')==-1:
            savepath = savepath+'/index.html'
        if parseurl.netloc in self.allowhosts:
            fullurl = '{}://{}{}'.format(parseurl.scheme,parseurl.netloc,parseurl.path)
            savepath = 'zgjw/'+parseurl.netloc.replace('.81.cn','')+savepath
            return fullurl,savepath
        else:
            return fullurl,''

    def checkurl(self,url):
        check = False
        if url == None or len(url.strip())==0:
            return check

        if url.find('http') == -1 and url.find('.') > -1:
            check = True
        elif url.find('content_') > -1 or url.find('node_') > -1:
            for host in self.allowhosts:
                if host in url:
                    check = True
                    break
        elif url in self.ztlist:
            check = True
        return check

    def savehtml(self,filepath,html):
        filepath = os.path.join(self.datadir,filepath)

        dir = os.path.dirname(filepath)
        if os.path.exists(dir) == False:
            os.makedirs(dir)
        if os.path.isfile(filepath):
            os.unlink(filepath)

        doc = pq(html)
        doc('head').append('<script src="http://img.qjw.jw/main/js/FrontPage.js" ></script>')
        doc('body').before('<script type="text/javascript">FrontPage("nav",document.charset)</script>')
        doc('body').append('<script type="text/javascript">FrontPage("copyright",document.charset)</script>')
        html = doc.outer_html()

        with open(filepath, 'w', encoding="utf-8") as file:
            html = html.replace('&#13;', '')
            html = html.replace('<div class="sidebar hidden-xs hidden-sm"></div>','<div class="xxqj_sidebar hidden-xs hidden-sm"><script src="http://zt.qjw.jw/xxqj/sider.js"></script></div>')
            file.write(html)
            file.close()

    def downres(self,pageurl, html, force=False):
        doc = pq(html)
        new_urls = set()

        links = doc('link').items()
        for link in links:
            href = link.attr.href
            check = self.checkurl(href)
            if check == False:
                link.remove()
                continue
            fullurl, savepath = self.geturlandsavepath(pageurl, href)
            if savepath == '':
                link.remove()
                continue
            ext = os.path.splitext(savepath)[-1]
            if ext != '.css':
                continue
            content = ''
            href = unquote(href, 'utf-8').strip()
            link.attr.href = '/' + savepath
            if force == True:
                Urls.delurl(url=fullurl, savepath=savepath)
            Urls.addurl(url=fullurl, savepath=savepath, fromurl=pageurl, filetype=2, filestatus=0, ztname = self.ztname)

        links = doc('img').items()
        for link in links:
            href = link.attr.src
            check = self.checkurl(href)
            if check == False:
                link.remove()
                continue
            fullurl, savepath = self.geturlandsavepath(pageurl, href)
            if savepath == '':
                link.remove()
                continue
            href = unquote(href, 'utf-8').strip()
            link.attr.src = '/' + savepath
            if force == True:
                Urls.delurl(url=fullurl, savepath=savepath)
            Urls.addurl(url=fullurl, savepath=savepath, fromurl=pageurl, filetype=2, filestatus=0, ztname = self.ztname)

        links = doc('script').items()
        for link in links:
            href = link.attr.src
            if href is None or len(href) < 5:
                continue
            check = self.checkurl(href)
            if check == False:
                link.remove()
                continue
            fullurl, savepath = self.geturlandsavepath(pageurl, href)
            if savepath == '':
                link.remove()
                continue
            href = unquote(href, 'utf-8').strip()
            link.attr.src = '/' + savepath
            if force == True:
                Urls.delurl(url=fullurl, savepath=savepath)
            Urls.addurl(url=fullurl, savepath=savepath, fromurl=pageurl, filetype=2, filestatus=0, ztname = self.ztname)

        html = doc.outer_html()
        return html

    def downfile(self,srcUrl, localFile):
        localFile = os.path.join(self.datadir,localFile)
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
        with closing(requests.get(srcUrl,headers=headers,stream=True)) as response:
            chunk_size = 1024  # 单次请求最大值
            content_size = int(response.headers['content-length'])  # 内容体总大小

            oldsize = 0
            if os.path.isfile(localFile) == True:
                oldsize = os.path.getsize(localFile)
                oldsize = int(oldsize)
                if oldsize<content_size:
                    os.unlink(localFile)
                else:
                    return
            dir = os.path.dirname(localFile)
            if os.path.isdir(dir)==False:
                os.makedirs(dir)
            filename = os.path.basename(localFile)

            data_count = 0
            with open(localFile, "wb") as file:
                for data in response.iter_content(chunk_size=chunk_size):
                    file.write(data)
                    data_count = data_count + len(data)
                    now_jd = (data_count / content_size) * 100
                    print("\r文件下载进度：%d%%(%.2f/%.2f MB) - %s" % (now_jd, data_count/1024/1024, content_size/1024/1024, filename), end=" ")
            print('\r')

    def down_m3u8(self,id,filepath):
        filepath = os.path.join(self.datadir,filepath)
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36","Host":"yspmvms.81.cn"}
        newurl = 'http://yspmvms.81.cn/?id={}&callbackparam=jQuery111305642970661431972_1546766302660&ctype=sd&ttype=pc&_=1546766302661'.format(id)
        text = requests.get(newurl,headers=headers).text
        m3u8_url = re.findall(r'(http:.*m3u8)',text)[0]
        filename = os.path.basename(m3u8_url)
        headers = {'X-Requested-With':'ShockwaveFlash/27.9.9.999',"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
        text = requests.get(m3u8_url,headers=headers).text
        matches = re.findall(r'{}_\d+\.ts'.format(filename),text,re.M)
        total = len(matches)
        count = 0
        dir = os.path.dirname(filepath)
        if os.path.isdir(dir) == False:
            os.makedirs(dir)
        for match in matches:
            realurl = '{}{}'.format(m3u8_url,match.replace(filename,''))
            header = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"}
            response = requests.get(realurl, timeout=120, headers=header)
            with open(filepath, mode='ab+') as f:
                f.write(response.content)
            count = count+1
            print("\r正在下载视频文件%s,共有%d个切片，已下载%d个" % (filename,total, count),
                  end=" ")
        newpath = filepath+'.mp4'
        print('\r开始转换m3u8文件')
        cmd_str = 'D:\\soft\\ffmpeg\\bin\\ffmpeg.exe -v quiet -i \"' + filepath + '\" ' + '-acodec copy -vcodec copy -absf aac_adtstoasc \"' + newpath + '\" '
        subprocess.call(cmd_str, shell=True)
        os.unlink(filepath)
        os.rename(newpath, filepath)
        print('转换完成')

    def content(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        urlparse = parse.urlparse(url)
        dirpre = self.getdirpre(urlparse.path)

        html = self.gethtml(url)
        if html == '':
            return
        html = re.sub(r'<script.*?http://cl3.*</script>',r'',html)
        html = re.sub(r'<script.*?wxs-1\.0\.0\.js.*</script>',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)
        html = re.sub(r'var\s*bp[\s\S]*s\);',r'',html)
        html = re.sub(r'\s"',r'"',html)
        html = re.sub(r"\s'",r"'",html)

        doc = pq(html)
        doc('.header').remove()
        doc('a.app').remove()
        doc('#share-to').remove()
        doc('#side-fixed-button').remove()
        doc('.bdsharebuttonbox').remove()
        doc('.nav-bar').remove()
        doc('#pageFooter').remove()
        doc('.footer-content').remove()
        doc('div.media a').remove_attr('href')
        doc('.relation-list').remove()
        doc('.sidebar').html('')
        doc('.breadcrumb a').remove_attr('href')
        doc('#loadNextPageBtn').remove()
        doc('.container').css('margin-top','0')
        doc('#ArticleRelation').remove()

        links = doc('#displaypagenum a').items()
        for link in links:
            href = link.attr.href
            check = self.checkurl(href)
            if check == False:
                continue
            fullurl, newsavepath = self.geturlandsavepath(url, href)
            if newsavepath == '' or self.checkurl(fullurl) == False:
                link.remove_attr('href')
            else:
                Urls.addurl(url=fullurl, fromurl=url, deepth=deepth + 1, filetype=1, savepath=newsavepath, ztname = self.ztname )
                if href.find('http')>-1:
                    link.attr.href = '/'+newsavepath
                else:
                    pass
        pagehtml = doc('#displaypagenum').html()
        doc('a').remove_attr('href')
        doc('#displaypagenum').html(pagehtml)
        html = doc.outer_html()
        html = html.replace('&#13;', '')

        pattern = re.compile(r'//Video\s+(http:.*?\.mp4)')
        match = pattern.findall(html)
        if match:
            for m in match:
                filename = os.path.basename(m)
                mp4path = os.path.dirname(savepath)+"/"+filename
                Urls.addurl(url=m, fromurl=url, deepth=deepth + 1, filetype=3, savepath=mp4path, ztname = self.ztname)
                html = html.replace(m,'/'+mp4path)

        pattern = re.compile(r'//Audio\s+(http:.*?\.mp3)')
        match = pattern.findall(html)
        if match:
            for m in match:
                filename = os.path.basename(m)
                mp4path = os.path.dirname(savepath)+"/"+filename
                Urls.addurl(url=m, fromurl=url, deepth=deepth + 1, filetype=3, savepath=mp4path, ztname = self.ztname)
                html = html.replace(m,'/'+mp4path)

        html = self.downres(url,html)
        self.savehtml(savepath,html)

    def tv(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        urlparse = parse.urlparse(url)
        dirpre = self.getdirpre(urlparse.path)

        html = self.gethtml(url)
        if html == '':
            return
        html = re.sub(r'<script.*?http://cl3.*</script>',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)
        html = re.sub(r'var\s*bp[\s\S]*?s\);',r'',html)
        html = re.sub(r'\s"',r'"',html)
        html = re.sub(r"\s'",r"'",html)
        html = re.sub(r'<script>var\s*ua[\s\S]*</script>','',html)

        doc = pq(html)
        doc('body').css('padding-top','0')
        doc('.player').css('width','1140px')
        doc('.player-list').remove()
        doc('.video-info-share').remove()
        doc('.bdsharebuttonbox').remove()
        doc('.header').remove()
        doc('.navbar').remove()
        doc('#pageFooter').remove()
        doc('.footer-content').remove()
        doc('head').append('<style>.video-box video{width:100%;height:500px;}</style>')
        doc('.breadcrumb a').remove_attr('href')
        doc('.v_aside a').remove_attr('href')

        videourl = doc('#cmplayer').attr('data-media')
        if videourl == None:
            videourl = doc("#mediaurl").text()
            filename = os.path.basename(videourl)
            mp4path = os.path.dirname(savepath) + "/" + filename
            Urls.addurl(videourl,fromurl=url,savepath=mp4path,deepth=deepth+1,filetype=3,filestatus=0, ztname = self.ztname)
            html = doc.outer_html()
            html = html.replace('&#13;', '')
            html = html.replace(videourl, '/'+mp4path)
            html = self.downres(url, html)
            pass
        else:
            videourl = videourl.strip()
            res = re.findall(r'id=([^&]+).*?([^/]*\.mp4)',videourl)
            id = res[0][0]
            mp4 = res[0][1]
            dir = os.path.dirname(savepath)
            mp4path = os.path.join(dir, mp4)
            currdir = os.getcwd()
            mp4path = os.path.join(currdir,mp4path)
            mp4path = mp4path.replace('\\\\','\\')
            mp4path = mp4path.replace('\\','\\\\')
            if os.path.isfile(mp4path):
                os.unlink(mp4path)
            currdir = currdir.replace('\\','\\\\')
            mp4path = mp4path.replace(currdir+'\\\\','')
            Urls.addurl(id,fromurl=url,savepath=mp4path,deepth=deepth+1,filetype=3,filestatus=0, ztname = self.ztname)
            doc('#cmplayer').remove()
            html = doc.outer_html()
            html = html.replace('&#13;', '')
            html = self.downres(url,html)
            doc = pq(html)
            replacestr = '<script title="sript"><!-- //Video {} --></script><script src="/xxqj/www/xue-xi/15746.files/javascript-2.5.js?v=3.5"></script>'.format('/'+mp4path)
            doc('.video-box').append(replacestr)
            html = doc.outer_html()
            html = html.replace('&#13;', '')

        self.savehtml(savepath,html)

    def node(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        urlparse = parse.urlparse(url)
        dirpre = self.getdirpre(urlparse.path)

        html = self.gethtml(url)
        if html == '':
            return
        html = re.sub(r'<script\s*src="http://www\.81\.cn:80/res/wxs.*</script>',r'',html)
        html = re.sub(r'<script>\s*window\._bd_share_config[\s\S]*?</script>',r'',html)
        html = re.sub(r'document\.write\(unescape\("%3Cscript[\s\S]*\)\);',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)

        doc = pq(html)
        doc('.bg-relative').css('height','552px').css('overflow','hidden')
        doc('a.nav-img').css('margin-top','20px').remove_attr('href')
        doc('.header').remove()
        doc('.nav').remove()
        doc('.nav-bar').remove()
        doc('.sidebar').html('')
        doc('#pageFooter').remove()
        doc('#side-fixed-button').remove()
        doc('.footer-content').remove()
        doc('.breadcrumb a').remove_attr('href')
        doc('.footer').remove()
        doc('#loadNextPageBtn').remove()

        list = doc('#main-news-list')
        if list:
            pages = doc('#displaypagenum a').items()
            for u in pages:
                href = u.attr('href')
                if href==None or len(href.strip())==0:
                    u.remove_attr('href')
                    continue
                else:
                    fullurl, newsavepath = self.geturlandsavepath(url, href)
                    if newsavepath == '':
                        u.remove_attr('href')
                    else:
                        Urls.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=5,savepath=newsavepath, ztname = self.ztname)
                        if href.find('http')>-1:
                            u.attr.href = '/'+newsavepath
            contents = doc('#main-news-list li a').items()
            for u in contents:
                href = u.attr('href')
                if href.find('content_')==-1 and url.find('node_102746')==-1 and url.find('jvjt2018.htm')==-1:
                    u.remove_attr('href')
                    continue
                    pass
                fullurl, newsavepath = self.geturlandsavepath(url, href)
                if newsavepath == '' or self.checkurl(fullurl) == False:
                    u.remove_attr('href')
                else:
                    Urls.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=1,savepath=newsavepath, ztname = self.ztname)
                    if href.find('http')>-1:
                        u.attr.href = '/'+newsavepath
        links = doc('.nav-submenu a')
        if links:
            for link in links.items():
                href = link.attr.href
                check = self.checkurl(href)
                if check == False:
                    link.remove_attr('href')
                    continue
                fullurl, newsavepath = self.geturlandsavepath(url, href)
                if newsavepath == '':
                    link.remove_attr('href')
                else:
                    Urls.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=5,savepath=newsavepath, ztname = self.ztname)
                    if href.find('http')>-1:
                        link.attr.href = '/'+newsavepath
                    else:
                        pass
        for link in doc('.container a').items():
            href = link.attr.href
            check = self.checkurl(href)
            if check == False:
                link.remove_attr('href')
                continue
            if href.startswith('/xxqj/'):
                continue
            fullurl, newsavepath = self.geturlandsavepath(url, href)
            if newsavepath == '' or self.checkurl(fullurl) == False:
                link.remove_attr('href')
            else:
                Urls.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=1,savepath=newsavepath, ztname = self.ztname)
                if href.find('http')>-1:
                    link.attr.href = '/'+newsavepath
                else:
                    pass

        html = doc.outer_html()
        html = html.replace('&#13;', '')

        html = self.downres(url,html)
        self.savehtml(savepath,html)

    def css(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        savepath = os.path.join(self.datadir,savepath)

        html = self.gethtml(url)
        self.downfile(url, savepath)
        content = ''
        try:
            with open(savepath, 'r', encoding='utf-8') as f:
                content = f.read()
        except Exception:
            pass
        pattern = re.compile(r"url\((.*?)\)")
        match = pattern.findall(content)
        if match:
            for img in match:
                img = img.strip('"')
                img = img.strip("'")
                fullurl, newsavepath = self.geturlandsavepath(url, img)
                if newsavepath == '':
                    continue
                Urls.addurl(url=fullurl, fromurl=url, deepth=deepth + 1, filetype=2, savepath=newsavepath, ztname = self.ztname)

    def singlework(self,model):
        if model.filetype == 5:
            self.node(model)
        elif model.filetype == 1:  # 网页
            if model.url.find('node_') > -1:
                self.node(model)
            elif model.url.find('tv') > -1:
                self.tv(model)
            elif model.url.find('content_') > -1:
                self.content(model)
            else:
                self.content(model)
        elif model.filetype == 2:  # 网页资源
            if model.url.find('.css') > -1:
                self.css(model)
            else:
                self.downfile(model.url, model.savepath)
                pass
        elif model.filetype == 3:  # 音视频资源
            if model.url.find('http') > -1:
                self.downfile(model.url, model.savepath)
            else:
                self.down_m3u8(model.url, model.savepath)

        savepath = os.path.join(self.datadir,model.savepath)
        if os.path.isfile(savepath):
            Urls.setstatus(model.id, 1)
        else:
            Urls.setstatus(model.id, 10)

    def parseindex(self,url,doc):
        doc('.nav-bar').remove()
        doc('.footer').remove()

        links = doc('a').items()
        for link in links:
            href = link.attr.href
            check = self.checkurl(href)
            if check == False:
                link.remove_attr('href')
                continue
            fullurl, newsavepath = self.geturlandsavepath(url, href)
            if newsavepath == '':
                link.remove()
            else:
                filetype = 1
                if href.find('node_') > -1:
                    filetype = 5
                else:
                    filetype = 1
                if self.checkurl(fullurl):
                    Urls.addurl(url=fullurl, fromurl=url, savepath=newsavepath, filetype=filetype, deepth=1, ztname = self.ztname)
                    if href.find('http') > -1:
                        link.attr.href = '/' + newsavepath
                else:
                    link.remove_attr('href')
        html = doc.outer_html()
        html = html.replace('&#13;', '')
        return html


if __name__ == '__main__':
    Urls.create_table()