'''
v1.01 2018-02-06 first commit.
'''

import re
import os
import time
import urllib.request
import threading


#loop for crawler
def fun_timer():
    #set url headers
    url = "http://blog.csdn.net"
    headers=("User-Agent","Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36")
    opener = urllib.request.build_opener()
    opener.addheaders=[headers]

    #get data and decode
    data = opener.open(url).read().decode("utf-8")

    #set filter
    pat = '<a strategy="wechat" href="(.*?)"'
    result = re.compile(pat).findall(data)

    #delete repeat data
    lst = list(set(result))

    #get current file
    AbsFile=__file__
    #get current path
    FileName=AbsFile[:AbsFile.rfind("\\")]

    #create new documents
    DocNewPath = FileName + "_" + time.strftime("%Y%m%d%H%M%S", time.gmtime())
    os.makedirs(DocNewPath)

    #set file name
    FileName = DocNewPath + "\\" + time.strftime("%Y%m%d%H%M%S", time.gmtime())

    #loop for retrieve data
    for count in range(0,len(lst)):
        try:
            strFileName = FileName + "_" + str(count) + ".html"
            urllib.request.urlretrieve(lst[count],strFileName)
        except urllib.error.URLError as ex:
            print(str(ex))

    #recall the function
    global timer
    timer = threading.Timer(int(res[0]),fun_timer)
    timer.start()


while(1):
    #set loop function time span
    TimerSpan = input("请输入爬虫间隔时间(单位秒):")
    res = re.findall(r'\d+',TimerSpan)
    if(int(res[0]) < 60):
        print("最小的间隔时间是60s,请重新设置")
        continue
    else:
        break


#start function
timer = threading.Timer(2,fun_timer)
timer.start()
