#coding:utf-8

import urllib.request
from bs4 import BeautifulSoup
import os
import re
import datetime
import platform
import time
#import sys
#reload(sys)
#sys.setdefaultencoding("utf-8")


def getPage(href): #伪装成浏览器登陆,获取网页源代码
    headers = {  
        'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'  
    }  
    req = urllib.request.Request(  
        url = href ,
        headers = headers  
    )
    try:
        post = urllib.request.urlopen(req)
    except urllib.request.HTTPError as e:
        print(e.code)
        print(e.reason)
    return post.read()

url = 'http://blog.csdn.net/'

def getEvery(url):
    hrefList = []
    page = BeautifulSoup(getPage(url),'lxml')
    print(url)
    div = page.find('div',class_='nav_com')
    liList = div.find_all('li')
    if not os.path.exists('html'): 
     os.mkdir('html')
    for li in liList:
        href = 'http://blog.csdn.net' + li.a.get('href')
        if href!='http://blog.csdn.net/':
            hrefList.append(href)
    return hrefList


def getAll(href):
    page=BeautifulSoup(getPage(href),'lxml') 
#    print('page='+str(page))
    div = page.find_all(class_='csdn-tracking-statistics')
#    print('div='+str(div))
    for divit in div:
     for li in divit.find_all(target="_blank"):
#        print('li='+str(li))
        href_it = li.get('href')
        name = li.get_text()
#        print('href='+str(href_it))
        try:
         ti=BeautifulSoup(getPage(href_it),'lxml').title
         print('title='+str(ti.text.strip()))
        except Exception as e: 
        	print(e)
        try:
         urllib.request.urlretrieve(href_it,"html/"+ti.text.strip()+".html")
         getnext(href_it)
        except Exception as e: 
        	print(e)
        
def getnext(href):
	  if('article/details' in href):
	  	try:
	  		lenfind=href.find('article/details')
	  		href_lenfind=href[0:lenfind]
	  		print(href_lenfind)
	  		print(href)
	  		getBlog(href_lenfind)
	  	except Exception as e:
	  		print(e)
def getBlog(href):
    page=BeautifulSoup(getPage(href),'lxml')
#    print('page='+str(page))
    div = page.find_all(class_='blog-unit')
#    print('div='+str(div))
    for divit in div:
     for li in divit.find_all(target="_blank"):
        name = li.get_text()
#        print('li='+str(li))
        href_it = li.get('href')
        if href_it!='http://blog.csdn.net/':
            href_it='http://blog.csdn.net/'+href_it
        print('href='+str(href_it))
        try:
         ti=BeautifulSoup(getPage(href_it),'lxml').title
         print('title='+str(ti.text.strip()))
        except Exception as e: 
        	print(e)
        try:
         urllib.request.urlretrieve(href_it,"html/"+ti.text.strip()+".html")
        except Exception as e: 
        	print(e)
def run_task():
    print('start')
    hrefList = getEvery(url)
    for href in hrefList:
     getAll(href)

def timer():
#设定开始时间sched_time
 sched_time = datetime.datetime(2017, 12, 27, 22, 38, 00)
#间隔时间 days,hours,minutes,seconds
 timedelta=datetime.timedelta(days=1)
     #取当下时间
 now = datetime.datetime.now()
    #判断是否开始时间已错过,如果结果为负数，则提示错误
 if (str(sched_time-now)[0]=='-'):
  print ('开始时间已经错过，请重新调整开始时间')
 else:
  while True:
   now = str(datetime.datetime.now())[:-7]
   if now==str(sched_time):
    print ('本次开始时间：'+str(sched_time))
    sched_time=str(datetime.datetime.now()+timedelta)[:-7]
    print ('下次开始时间：'+str(sched_time))
    print ('请在这里开始你的程序')
    run_task()
    time.sleep(1)


if __name__=="__main__":
    timer()
