import os, time, json, re, traceback, shutil
from peaktask.utils import *
from peaktask.modules.junbaozt import ZTSpider,Urls
from pyquery import PyQuery as pq


## 可在此编写测试脚本，完成后，移动至tasks目录下，即可自动加载文件
config = {
    'interval':120,
    'title':'2019年不忘初心牢记使命',
    'remark':'2019年不忘初心牢记使命',
    'name':os.path.basename(__file__)[0:-3]
}

#@wraplog(config['name'])
def run():
    Urls.create_table()
    spider = ZTSpider()
    spider.ztname = '2019zt'
    spider.datadir = '../data/'
    spider.ztlist = ['http://www.81.cn/2019zt/1057667.htm','http://www.81.cn/2019zt/105485.htm','http://www.81.cn/2019zt/105004.htm','http://www.81.cn/2019zt/105608.htm','http://www.81.cn/2018zt/index.htm','http://www.81.cn/2019sdxrs/node_105647.htm','http://www.81.cn/2019zt/105491.htm']

    query = (Urls.update(filestatus=0).where(Urls.url.contains('node_')))
    query.execute()
    query = (Urls.update(filestatus=0).where(Urls.filestatus >= 5))
    query.execute()

    for url in spider.ztlist:
        fullurl, savepath = spider.geturlandsavepath(url, '')
        html = spider.gethtml(url)
        html = re.sub(r'<script\s*src="http://www\.81\.cn:80/res/wxs.*</script>', r'', html)
        html = re.sub(r'<script>\s*window\._bd_share_config[\s\S]*?</script>', r'', html)
        html = re.sub(r'document\.write\(unescape\("%3Cscript[\s\S]*\)\);', r'', html)
        html = re.sub(r'<!--分享[\s\S]*?分享-->', r'', html)
        html = re.sub(r'<!--导航[\s\S]*?导航-->', r'', html)
        html = re.sub(r'wd_paramtracker.*\)', r'', html)
        html = re.sub(r'window\.location[\s\S]*?;', '', html)
        html = re.sub(r'window\._bd_share_config[\s\S]*?];}', '', html)
        html = re.sub(r'\s*"', r'"', html)
        html = re.sub(r"\s*'", r"'", html)

        doc = pq(html)
        doc('.nav').remove()
        doc('.nav-bar').remove()
        doc('.footer').remove()
        doc('.top_bar').remove()
        doc('.global-nav-bar').remove()
        html = spider.parseindex(url,doc)
        html = spider.downres(url, html, True)
        spider.savehtml(savepath, html)

    trynum = 0
    while True and trynum <= 100:
        trynum = trynum +1
        obj,left = Urls.popurl()
        while obj is not None:
            try:
                trynum = 0
                spider.singlework(obj)
                obj,left = Urls.popurl()
            except Exception as ex:
                traceback.print_exc()
                obj = None
    zipdir()

def zipdir():
    try:
        if os.path.isfile('../data/zgjw/www/2019zt/105485.htm'):
            shutil.copy('../data/zgjw/www/2019zt/105485.htm', '../data/zgjw/www/2019zt/105491.htm')
        basedir = r'\\192.168.10.60\d$\chuanshu\数据采集[张俊峰]\zgjw'
        filename = 'zgjw' + str(datetime.datetime.now().strftime('%m%d%H%M'))
        filename = os.path.join('../data/', filename)
        shutil.make_archive(filename, 'zip', root_dir='../data/zgjw')
        filename = filename + '.zip'
        destpath = os.path.join(basedir, os.path.basename(filename))
        shutil.move(filename, destpath)
        shutil.rmtree('../data/zgjw', True)
        print('本次处理结束')
    except Exception as ex:
        traceback.print_exc()


if __name__ == '__main__':
    run()
    #while True:
    #    run()
    #    print('{}：半个小时后再次采集'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
    #    time.sleep(60 * 30)
