import urllib.request as ur
import urllib.parse
import re
import urllib.error as ue
import os
import datetime as dt
import time

def crawl(number):
    #csdn首页
    url = 'http://blog.csdn.net'
    #设置简单的报头
    header = ('User-Agent','Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0')
    opener = ur.build_opener()
    opener.addheaders = [header]
    #安装全局报头
    ur.install_opener(opener)
    data = ur.urlopen(url).read().decode('utf-8', 'ignore')
    pat1 = 'a strategy=".*?" href="(.*?)" target="_blank"'
    pat2 = 'a href="(.*?)" target="_blank">\n.*?class="carousel.*?caption"'
    #预编译
    pattern = re.compile(pat1)
    pattern2 = re.compile(pat2)
    links = pattern.findall(data)
    links2 = re.findall(pat2, data)
    uk='D:\\csdnedu\\'+str(number)+'\\'
    if os.path.exists(uk):
        pass
    else:
        os.mkdir(uk)
    if len(links2) != 0:
        for i in range(len(links2)):
            links.append(links2[i])

    for i in range(len(links)):
        print('...正在下载第' + str(i) + '个网页...')
        try:
            ur.urlretrieve(links[i], uk + str(i) + ".html")
            print('下载成功！')
        except ue.URLError as e:
            if hasattr(e, 'code'):
                print(e.code)
            if hasattr(e, 'reason'):
                print(e.reason)

if __name__=='__main__':
    flag=0
    now=dt.datetime.now()
    #初次启动设置为当前时间加5s
    sched_timer=dt.datetime(now.year, now.month, now.day, now.hour, now.minute,now.second) + dt.timedelta(seconds=5)
    #每次爬取创建文件夹
    i=0
    while(True):
        now=dt.datetime.now()
        if sched_timer<now<sched_timer+dt.timedelta(seconds=1):
            time.sleep(1)
            print(now)
            crawl(i)
            i=i+1
            flag=1
        else:
            if flag==1:
                #设置每两分钟爬取一次
                sched_timer=sched_timer+dt.timedelta(minutes=2)
                flag=0


