import urllib.request
import re
import time,datetime
import os

#------------打开网页--------------------
def open_url(url):
     req = urllib.request.Request(url)
     req.add_header('Uesr-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')
     try:
         html = urllib.request.urlopen(req).read().decode('utf-8')
     except urllib.error.HTTPError:
            pass
     return html 

#---------------寻找网页中文章的连接地址--------------------------    
def find_link(html):
    linklist = []
    pat = '<a strategy=".*" href="(.*?\d)" target="_blank">'
    alllink = re.compile(pat).findall(html)
    for i in alllink:
        if i not in linklist:
            linklist.append(i)
    return linklist

#-----------------匹配文章的标题-------------------------------
def find_title(thislink):
    tag = ['\\', '/', ':', '*', '?', '《》', '<', '>', '|']
    try:
        thishtml = open_url(thislink)
        pat = '<h1 class="csdn_top">(.*?)</h1>'
        titlelist = re.compile(pat).findall(thishtml)
        thistitle = titlelist[0]
        for each in thistitle:
                if each in tag:
                    thistitle = thistitle.replace(each, '')
    except IndexError:
         pass
    return thistitle

#-------------网页存储本地------------------------------
def save_web(thistitle,thishtml):
    with open(thistitle + '.html', 'w', encoding='utf-8') as file:
                file.write(thishtml)

#-----------------爬虫主程序---------------------------------
def crawler(path):
    os.chdir(path)
    url = "https://blog.csdn.net/"
    html = open_url(url)
    linklist = find_link(html)
    for i  in linklist:
        thishtml = open_url(i)
        thistitle = find_title(i)
        save_web(thistitle,thishtml)

#-----------------定时循环-------------------------------------------
def loop(h,m,times):
    n = 0
    while True:
        now = datetime.datetime.now()
        if n == times:
            print('End!')
            break
        elif now.hour == h and now.minute == m:
            crawler(path)
            n += 1
            print('已完成%d次爬取，还剩余%d次爬取！' % (n,times-n))
        time.sleep(60) #间隔60s检测一次

path = input("请输入网页存储文件夹(请将'\\'替换成'/')：")
hour = int(input("请输入循环定时时钟："))
mins = int(input("请输入循环定时分钟："))
cru = int(input('请输入爬虫循环的次数：'))
loop(hour,mins,cru)
