# -*- coding:utf8 -*-
from zgjw_zt.utils import *
import re
from pyquery import PyQuery as pq
from zgjw_zt.db import *
import json


class spider():
    def __init__(self,url):
        self.url = url
        pass

    def index(self):
        fullurl,savepath = geturlandsavepath(self.url,'')

        html = gethtml(self.url)
        html = re.sub(r'<script\s*src="http://www\.81\.cn:80/res/wxs.*</script>',r'',html)
        html = re.sub(r'<script>\s*window\._bd_share_config[\s\S]*?</script>',r'',html)
        html = re.sub(r'document\.write\(unescape\("%3Cscript[\s\S]*\)\);',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)
        html = re.sub(r'window\.location[\s\S]*?;','',html)
        html = re.sub(r'\s*"',r'"',html)
        html = re.sub(r"\s*'",r"'",html)

        doc = pq(html)
        doc('.nav-bar').remove()
        doc('.footer').remove()
        doc('.rili').css('height','591px').css('overflow','hidden')

        query = (urls.update(filestatus=0).where(urls.url.contains('node_')))
        query.execute()
        query = (urls.update(filestatus=0).where(urls.filestatus>=5))
        query.execute()
        #query = (urls.update(filestatus=0).where(urls.filetype==1))
        #query.execute()

        links = doc('a').items()
        for link in links:
            href = link.attr.href
            check = checkurl(href)
            if check == False:
                link.remove_attr('href')
                continue

            fullurl, newsavepath = geturlandsavepath(self.url, href)
            if newsavepath=='':
                link.remove()
            else:
                filetype = 1
                if href.find('node_')>-1:
                    filetype = 5
                else:
                    filetype = 1
                db.addurl(url=fullurl,fromurl=self.url,savepath=newsavepath,filetype=filetype,deepth=1)
                if href.find('http')>-1:
                    link.attr.href = '/'+newsavepath
                    pass
                else:
                    pass

        doc('.menu ul li a').eq(0).attr('href','/xxqj/www/xue-xi/102726.htm')
        html = doc.outer_html()
        html = html.replace('&#13;', '')
        html = downres(self.url,html)
        savehtml(savepath,html)

    def content(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        urlparse = parse.urlparse(url)
        dirpre = getdirpre(urlparse.path)

        html = gethtml(url)
        if html == '':
            return
        html = re.sub(r'<script.*?http://cl3.*</script>',r'',html)
        html = re.sub(r'<script.*?wxs-1\.0\.0\.js.*</script>',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)
        html = re.sub(r'var\s*bp[\s\S]*s\);',r'',html)
        html = re.sub(r'\s"',r'"',html)
        html = re.sub(r"\s'",r"'",html)

        doc = pq(html)
        doc('.header').remove()
        doc('a.app').remove()
        doc('#share-to').remove()
        doc('#side-fixed-button').remove()
        doc('.bdsharebuttonbox').remove()
        doc('.nav-bar').remove()
        doc('#pageFooter').remove()
        doc('.footer-content').remove()
        doc('div.media a').remove_attr('href')
        doc('.relation-list').remove()
        doc('.sidebar').html('')
        doc('.breadcrumb a').remove_attr('href')
        doc('#loadNextPageBtn').remove()
        doc('.container').css('margin-top','0')
        doc('#ArticleRelation').remove()

        links = doc('#displaypagenum a').items()
        for link in links:
            href = link.attr.href
            check = checkurl(href)
            if check == False:
                continue
            fullurl, newsavepath = geturlandsavepath(url, href)
            if newsavepath=='':
                link.remove_attr('href')
            else:
                db.addurl(url=fullurl, fromurl=url, deepth=deepth + 1, filetype=1, savepath=newsavepath)
                if href.find('http')>-1:
                    link.attr.href = '/'+newsavepath
                else:
                    pass
        pagehtml = doc('#displaypagenum').html()

        doc('a').remove_attr('href')

        doc('#displaypagenum').html(pagehtml)

        html = doc.outer_html()
        html = html.replace('&#13;', '')

        pattern = re.compile(r'//Video\s+(http:.*?\.mp4)')
        match = pattern.findall(html)
        if match:
            for m in match:
                filename = os.path.basename(m)
                mp4path = os.path.dirname(savepath)+"/"+filename
                db.addurl(url=m, fromurl=url, deepth=deepth + 1, filetype=3, savepath=mp4path)
                html = html.replace(m,'/'+mp4path)

        pattern = re.compile(r'//Audio\s+(http:.*?\.mp3)')
        match = pattern.findall(html)
        if match:
            for m in match:
                filename = os.path.basename(m)
                mp4path = os.path.dirname(savepath)+"/"+filename
                db.addurl(url=m, fromurl=url, deepth=deepth + 1, filetype=3, savepath=mp4path)
                html = html.replace(m,'/'+mp4path)

        html = downres(url,html)
        savehtml(savepath,html)

    def tv(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        urlparse = parse.urlparse(url)
        dirpre = getdirpre(urlparse.path)

        html = gethtml(url)
        if html == '':
            return
        html = re.sub(r'<script.*?http://cl3.*</script>',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)
        html = re.sub(r'var\s*bp[\s\S]*?s\);',r'',html)
        html = re.sub(r'\s"',r'"',html)
        html = re.sub(r"\s'",r"'",html)
        html = re.sub(r'<script>var\s*ua[\s\S]*</script>','',html)

        doc = pq(html)
        doc('body').css('padding-top','0')
        doc('.player').css('width','1140px')
        doc('.player-list').remove()
        doc('.video-info-share').remove()
        doc('.bdsharebuttonbox').remove()
        doc('.header').remove()
        doc('.navbar').remove()
        doc('#pageFooter').remove()
        doc('.footer-content').remove()
        doc('head').append('<style>.video-box video{width:100%;height:500px;}</style>')
        doc('.breadcrumb a').remove_attr('href')
        doc('.v_aside a').remove_attr('href')

        videourl = doc('#cmplayer').attr('data-media')
        if videourl == None:
            videourl = doc("#mediaurl").text()
            filename = os.path.basename(videourl)
            mp4path = os.path.dirname(savepath) + "/" + filename
            db.addurl(videourl,fromurl=url,savepath=mp4path,deepth=deepth+1,filetype=3,filestatus=0)
            html = doc.outer_html()
            html = html.replace('&#13;', '')
            html = html.replace(videourl, '/'+mp4path)
            html = downres(url, html)
            pass
        else:
            videourl = videourl.strip()
            res = re.findall(r'id=([^&]+).*?([^/]*\.mp4)',videourl)
            id = res[0][0]
            mp4 = res[0][1]
            dir = os.path.dirname(savepath)
            mp4path = os.path.join(dir, mp4)
            currdir = os.getcwd()
            mp4path = os.path.join(currdir,mp4path)
            mp4path = mp4path.replace('\\\\','\\')
            mp4path = mp4path.replace('\\','\\\\')
            if os.path.isfile(mp4path):
                os.unlink(mp4path)
            currdir = currdir.replace('\\','\\\\')
            mp4path = mp4path.replace(currdir+'\\\\','')
            db.addurl(id,fromurl=url,savepath=mp4path,deepth=deepth+1,filetype=3,filestatus=0)
            doc('#cmplayer').remove()
            html = doc.outer_html()
            html = html.replace('&#13;', '')
            html = downres(url,html)
            doc = pq(html)
            replacestr = '<script title="sript"><!-- //Video {} --></script><script src="/xxqj/www/xue-xi/15746.files/javascript-2.5.js?v=3.5"></script>'.format('/'+mp4path)
            doc('.video-box').append(replacestr)
            html = doc.outer_html()
            html = html.replace('&#13;', '')

        savehtml(savepath,html)

    def node(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        urlparse = parse.urlparse(url)
        dirpre = getdirpre(urlparse.path)

        html = gethtml(url)
        if html == '':
            return
        html = re.sub(r'<script\s*src="http://www\.81\.cn:80/res/wxs.*</script>',r'',html)
        html = re.sub(r'<script>\s*window\._bd_share_config[\s\S]*?</script>',r'',html)
        html = re.sub(r'document\.write\(unescape\("%3Cscript[\s\S]*\)\);',r'',html)
        html = re.sub(r'wd_paramtracker.*\)',r'',html)

        doc = pq(html)
        doc('.bg-relative').css('height','552px').css('overflow','hidden')
        doc('a.nav-img').css('margin-top','20px').remove_attr('href')
        doc('.header').remove()
        doc('.nav-bar').remove()
        doc('.sidebar').html('')
        doc('#pageFooter').remove()
        doc('#side-fixed-button').remove()
        doc('.footer-content').remove()
        doc('.breadcrumb a').remove_attr('href')
        doc('.footer').remove()
        doc('#loadNextPageBtn').remove()

        list = doc('#main-news-list')
        if list:
            pages = doc('#displaypagenum a').items()
            for u in pages:
                href = u.attr('href')
                if href==None or len(href.strip())==0:
                    u.remove_attr('href')
                    continue
                else:
                    fullurl, newsavepath = geturlandsavepath(url, href)
                    if newsavepath == '':
                        u.remove_attr('href')
                    else:
                        db.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=5,savepath=newsavepath)
                        if href.find('http')>-1:
                            u.attr.href = '/'+newsavepath
            contents = doc('#main-news-list li a').items()
            for u in contents:
                href = u.attr('href')
                if href.find('content_')==-1 and url.find('node_102746')==-1 and url.find('jvjt2018.htm')==-1:
                    u.remove_attr('href')
                    continue
                    pass
                fullurl, newsavepath = geturlandsavepath(url, href)
                if newsavepath == '':
                    u.remove_attr('href')
                else:
                    db.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=1,savepath=newsavepath)
                    if href.find('http')>-1:
                        u.attr.href = '/'+newsavepath
        links = doc('.nav-submenu a')
        if links:
            for link in links.items():
                href = link.attr.href
                check = checkurl(href)
                if check == False:
                    link.remove_attr('href')
                    continue
                fullurl, newsavepath = geturlandsavepath(url, href)
                if newsavepath=='':
                    link.remove_attr('href')
                else:
                    db.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=5,savepath=newsavepath)
                    if href.find('http')>-1:
                        link.attr.href = '/'+newsavepath
                    else:
                        pass
        for link in doc('.container a').items():
            href = link.attr.href
            check = checkurl(href)
            if check == False:
                link.remove_attr('href')
                continue
            if href.startswith('/xxqj/'):
                continue
            fullurl, newsavepath = geturlandsavepath(url, href)
            if newsavepath=='':
                link.remove_attr('href')
            else:
                db.addurl(url=fullurl,fromurl=url,deepth=deepth+1,filetype=1,savepath=newsavepath)
                if href.find('http')>-1:
                    link.attr.href = '/'+newsavepath
                else:
                    pass

        html = doc.outer_html()
        html = html.replace('&#13;', '')


        html = downres(url,html)
        savehtml(savepath,html)

    def css(self,urls):
        url = urls.url
        deepth = urls.deepth
        savepath = urls.savepath

        html = gethtml(url)
        downfile(url, savepath)
        content = ''
        try:
            with open(savepath, 'r', encoding='utf-8') as f:
                content = f.read()
        except Exception:
            pass
        pattern = re.compile(r"url\((.*?)\)")
        match = pattern.findall(content)
        if match:
            for img in match:
                img = img.strip('"')
                img = img.strip("'")
                fullurl, newsavepath = geturlandsavepath(url, img)
                if newsavepath == '':
                    continue
                db.addurl(url=fullurl, fromurl=url, deepth=deepth + 1, filetype=2, savepath=newsavepath)

    def parse_rili(self):
        url = 'http://www.81.cn/rss/calendar.json'
        content = gethtml(url)
        jsonobj = json.loads(content)
        for date in jsonobj:
            for item in jsonobj[date]:
                link = item['news'][0]['link']
                fullurl,savepath = geturlandsavepath(link,link)
                item['news'][0]['link'] = '/'+ savepath
                db.addurl(fullurl,fromurl=fullurl,savepath=savepath,deepth=1,filetype=1,filestatus=0)

                media = item['news'][0]['media'].strip()
                if media!='':
                    fullurl, savepath = geturlandsavepath(media, media)
                    db.addurl(fullurl,fromurl=fullurl,savepath=savepath,deepth=1,filetype=1,filestatus=0)
                    item['news'][0]['media'] = '/'+ savepath

        with open('data/www/xue-xi/calendar.json','w',encoding='gb2312') as f:
            f.write(json.dumps(jsonobj))
