# _*_ coding:utf-8 _*_
__author__ = 'jiangchao'
__date__ = '2017/4/21 0021 上午 9:41'
import os,django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crawler_blog.settings")
django.setup()
import re, requests
import pickle
import sys
from bs4 import BeautifulSoup
from blog.models import Blog
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf8')
sys.setrecursionlimit(7000)


def requests_method(url):
    header = {
        'Accept-Encoding': 'gzip, deflate, sdch',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Host': 'blog.csdn.net',
        'Referer': 'http://www.csdn.net/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36',
    }
    r = requests.get(url, headers=header)
    return r


def get_page_total_num(soup):
    soup = soup.select('div.page_nav span')
    p_page_num = re.compile(r'\d+', re.S)
    page = 1
    for txt in soup:
        if txt:
            page = p_page_num.findall(str(txt))
    return page[1]


def get_page_content(url):
    r = requests_method(url)
    #result = re.findall(r'(<div id="article_details" class="details">.*?</div>)', r.text, re.S)
    soup = BeautifulSoup(r.text, 'lxml')
    result = soup.select('div #article_content')
    if not result:
        result = soup.select('div.skin_list')
    try:
        result = result[0]
    except:
        result = ''
    return str(result)


def crawler_storage(soup):
    title_list = []
    desc_list = []
    content_list = []
    p_link = re.compile(r'<a href="(.*?)".*?">.*?</a>')
    if soup:
        for soup in soup.select('div dl dd h3 a'):
            title_list.append(soup.get_text())
            content_list.append(re.sub(p_link, '\g<1>', str(soup)))

    soup = BeautifulSoup(r.text, 'lxml')
    if soup:
        for soup in soup.select('div dl dd div.blog_list_c'):
            desc_list.append(soup.get_text())
    content = []
    for url in content_list:
        content.append(get_page_content(url))
    for t_d in zip(title_list, desc_list, content):
        is_exists = Blog.objects.filter(title=t_d[0])
        if not is_exists:
            blog = Blog()
            blog.title = t_d[0]
            blog.desc = t_d[1]
            blog.content = t_d[2]
            blog.add_time = datetime.now()
            blog.save()
            print 'save one page'
    return


if __name__ == '__main__':
    url = 'http://blog.csdn.net/'
    r = requests_method(url)
    r.encoding = ('utf8',)
    soup = BeautifulSoup(r.text, 'lxml')
    crawler_storage(soup)
    page_num = int(get_page_total_num(soup))
    url_list = []
    for n in range(page_num):
        url_list.append(url + '?&page=%d' % n)
    for url in url_list:
        r = requests_method(url)
        r.encoding = ('utf8',)
        soup = BeautifulSoup(r.text, 'lxml')
        crawler_storage(soup)



