# coding:  utf-8
import DataUtils
from RequestUtils import RequestUtils
from datetime import datetime
import time
from threading import Timer
import json
from PageUtils import downloadPage


requestUtils = None
def scheduleTask(task,time):
    '''
    设定定时任务
    task 执行函数的名称
    time 延迟时间，单位 秒
    '''
    Timer(time, task).start()

def init_RequestUtils():
    global requestUtils
    resource = DataUtils.getResource();
    if resource == None:
        requestUtils =  None
    else:
        token = resource[2]
        cookie = resource[3]
        requestUtils = RequestUtils(cookie = cookie, token = token)

def task_subscription():
    global requestUtils
    # 初始化资源
    if requestUtils == None:
        init_RequestUtils()
    if requestUtils == None:
        scheduleTask(task_subscription, 60*10) #10分钟之后重新执行
        return
    print("初始化完成")
    print("\n")
    # 获取任务
    subscription = DataUtils.getTask()
    if subscription == None:
        scheduleTask(task_subscription, 60*10) #10分钟之后重新执行
        return
    print("获取到任务")
    print("\n")
    subscription = list(subscription)
    if subscription[5] == 2: #等待状态的公众号，先获取公众号信息
        try:
            sub_data = requestUtils.subscription_info(subscription[1])
            if sub_data:
                subscription[2] = sub_data["nickname"]
                subscription[5] = 1 #设定状态为 采集中
                subscription[6] = json.dumps(sub_data)
                subscription[7] = ""
                subscription[8] = sub_data["fakeid"]
                DataUtils.updateSubscription(subscription)
                print("更新任务状态为进行中")
                print("\n")
        except Exception:
            scheduleTask(task_subscription, 60*10)
            return
    elif subscription[5] == 1: #爬取过程中的公众号，继续爬取 
        pass
    task_pages(subscription)
    
def task_pages(subscription):
    global requestUtils
    print("获取页面列表中")
    print("\n")
    cur = 0
    count = 1
    if subscription[7]:  # cur-count 当spider_step有内容时，证明该公众号为之前中断的任务，继续爬取
        cur = int(subscription[7].split("-")[0])
        count = int(subscription[7].split("-")[1])
    else:
        try:
            num = requestUtils.articles_nums(fakeid = subscription[8])
            count = int(num/5) + 1
            cur = 0
            subscription[5] = 1 #标记当前任务进行中
        except Exception:
            scheduleTask(task_subscription, 60*10)
            return
    
    try:
        while cur < count:
            pages = requestUtils.get_articles_data(fakeid = subscription[8], begin = cur*5)['app_msg_list']
            for page in pages:
                data_page = (None, None, None, None, 2, None, None, page)
                DataUtils.insertPageInfo(data = data_page, sub = subscription)
            cur += 1
            subscription[7] = str(cur)+"-"+str(count)
        else:
            subscription[5] = 0
            DataUtils.updateSubscription(subscription)
    except Exception:
        DataUtils.updateSubscription(subscription)
        scheduleTask(task_subscription, 60*10)
    else:
        scheduleTask(task_subscription, 60) #1分钟之后执行下一个任务

def task_spider_page():
    page = DataUtils.getTask_page()
    if page:
        page = list(page)
        link = page[5]
        tem = int(time.time())
        file = "pages/"+str(tem)+".html"
        try:
            downloadPage(link,file)
        except Exception:
            page[4] = 3
            DataUtils.updatePageInfo(page)
        else:
            page[6] = "/pages/"+file
            page[4] = 0
            DataUtils.updatePageInfo(page)
    scheduleTask(task_spider_page, 5)

if __name__ == "__main__":
    task_subscription()
    task_spider_page()
    