# -*- coding: utf-8 -*-
import requests,re,time,MySQLdb,threading

header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0'}
db = MySQLdb.connect(host='localhost',user='root',passwd='root',db='test',charset='utf8')
cursor = db.cursor()
cursor.execute("select dateline from news_cnblogs_news order by dateline DESC")
last_gettime =  cursor.fetchone()

def get_image(*img_url):
    for img in img_url:
        img_name = re.search(r'^.*/(.*)$',img)
        if img_name:
            img_name = img_name.group(1)
        else:
            return None
        img_t = requests.get(img)
        f = open('E:\python\python_test\pachong\images\\'+img_name,'wb')
        f.write(img_t.content)
        print u'图片下载完成'
        f.close()

def get_content(c_urls):
    num = 0
    for c_url in c_urls:
        #print c_url
        gcurl = 'http://news.cnblogs.com'+c_url
        c_html = requests.get(gcurl,headers=header)

        #print c_html.text.encode('utf-8')
        g_content = re.search(r'<div id="news_body">(.*?)</div><!--end: news_body -->',c_html.text,re.S).group(1) #博文内容
        g_content = g_content.strip()
        img_url = re.findall(r'src="(.*?)"',g_content,re.S)
        #get_image(img_url)
        if img_url:
            t = threading.Thread(target=get_image,args=img_url)#图片下载线程
            t.start()

        g_content = re.sub(r'http://images0.cnblogs.com/news/.*?/.*?/','/images/',g_content) #修改图片路径
        g_title = re.search(r'<div id="news_title"><a href=".*?">(.*?)</a>',c_html.text,re.S).group(1)#博文标题
        g_time = re.search(r'<span class="time">.*? (.*?)</span>',c_html.text,re.S).group(1)
        g_time = int(time.mktime(time.strptime(g_time,'%Y-%m-%d %H:%M'))) #博文时间

        num = num+1

        cursor.execute('insert into news_cnblogs_news (title,content,dateline) values(%s,%s,%s)',(g_title,g_content,g_time))

        print u'爬取内容：'+ str(num)
    return num

def get_allpage():#获取所有分页
    urls = []

    for i in range(1,21):
        url = 'http://news.cnblogs.com/n/page/'+str(i)+'/'
        urls.append(url)
    return urls

def get_allurl():#获取所有新闻链接地址

    print u'开始爬取新闻'

    urls = get_allpage()
    #print urls
    #c_urls = []
    num = 0
    num1 = 0
    for url in urls:
        html = requests.get(url,headers = header)
        url_page = re.findall(r'<h2 class="news_entry">.*?<a href="(.*?)" target="_blank">',html.text,re.S)
        url_createtime = map(lambda x:int(time.mktime(time.strptime(x,'%Y-%m-%d %H:%M'))),re.findall(r'<span class="gray">(.*?)</span>',html.text,re.S))
        c_urls = zip(url_page,url_createtime)

        for c_url in c_urls:
            if c_url[1] <= int(last_gettime[0]):
                url_page.remove(c_url[0])

        if not url_page:
            break

        num1 +=1

        print u'爬取页数：'+str(num1)
        num = num + get_content(url_page)
    print u'共爬取的新闻条数：'+str(num)

print get_allurl()
cursor.close()
db.commit()
db.close()
#get_content(chinese)