# __coding:utf-8__
# 新浪博客爬虫
from bs4 import BeautifulSoup;
from urllib.request import *;
import sys;
import hashlib;

"""函数开始"""


def get_article_list_url(url):
    """获取博文列表url"""
    article_list_url = '';

    index_handle = urlopen(url);
    bsObj = BeautifulSoup(index_handle, 'lxml');
    result = bsObj.find_all('a');
    for link in result:
        if link.text == '博文目录':
            article_list_url = link.attrs['href'];
    return article_list_url


def get_article_page_url_list(url):
    """获取博文分页列表"""
    article_page_url_list = [url];
    html = urlopen(blog_article_list_url);
    bsObj = BeautifulSoup(html, 'lxml');
    result = bsObj.find('div', {'class': 'SG_page'}).find_all('a');

    for link in result:
        href = link.attrs['href'];
        if href and href not in article_page_url_list:
            article_page_url_list.append(href);

    article_page_url_list = sorted(article_page_url_list);
    return article_page_url_list;


def get_article_link_list(page_list):
    """获取博客详情地址列表"""
    url_list = [];
    for link in page_list:
        html = urlopen(link);
        bsObj = BeautifulSoup(html, 'lxml');
        result = bsObj.find('div', {'class': 'articleList'}).find_all('a');
        for article_url in result:
            url = article_url.attrs['href']
            if (url not in url_list and url.find('s/blog_') != -1):
                url_list.append(url);
    return url_list;


def get_blog(url):
    html = urlopen(url);
    bsObj = BeautifulSoup(html, 'lxml');
    title = bsObj.find('div', {'id': 'articlebody'}).find('div', {'class': 'articalTitle'}).find('h2').text;
    publsh_time = bsObj.find('div', {'id': 'articlebody'}).find('div', {'class': 'articalTitle'}).find('span', {
        'class': 'time SG_txtc'}).text.strip().strip('()');
    author = bsObj.find('strong', {'id': 'ownernick'}).text.strip();
    source = '新浪博客'
    content = bsObj.find('div', {'id': 'articlebody'}).find('div', {'class': 'articalContent'}).get_text();
    target_url = url;
    data = {};
    data['title'] = title;
    data['publsh_time'] = publsh_time;
    data['author'] = author;
    data['source'] = source;
    data['content'] = content;
    data['target_url'] = target_url;
    return data;


def print_g(info):
    import sys;
    print(info);
    sys.exit();


def sha1(str):
    import hashlib;
    sha1 = hashlib.sha1();
    sha1.update(str.encode('utf-8'));
    return sha1.hexdigest();


def get_file_cache(key):
    import json;
    try:
        data = None;
        with open('./cache/' + key + '.json', 'r', encoding='utf-8') as handle:
            result = handle.read();
            handle.close();
            result = result.strip();
            if (len(result) != 0):
                data = json.loads(result);
    except IOError as E:
        data = None;
    return data


def set_file_cache(key, data={}):
    import json;
    data = json.dumps(data, ensure_ascii=False);
    if (len(data.strip()) == 0):
        return False;
    try:
        with open('./cache/' + key + '.json', 'a', encoding='utf-8') as handle:
            handle.write(data);
            handle.close();
        result = True;
    except IOError as E:
        result = False;
    return result


"""函数结束"""

# 获取博文列表地址
blog_index_url = 'http://blog.sina.com.cn/yuxiuhua1976';
blog_index_url = 'http://blog.sina.com.cn/twocold';
blog_index_url = 'http://blog.sina.com.cn/1360qxh';
key = sha1(blog_index_url);
result = get_file_cache(key);
article_url_list = result;
if (article_url_list == None):
    blog_article_list_url = get_article_list_url(blog_index_url);
    #  获取博文列表地址是否存在
    if blog_article_list_url == '' or blog_article_list_url is None:
        print('博文地址不存在');
        sys.exit();
    # 获取博客分页地址
    page_list = get_article_page_url_list(blog_article_list_url);
    # 获取博客详情地址列表
    article_url_list = get_article_link_list(page_list)
    set_file_cache(key, article_url_list);

# 获取博文详情
i = 1;
for url in article_url_list:
    # if (i == 1):
    #     break;
    key = sha1(url);
    result = get_file_cache(key);
    if (result == None):
        print(i);
        print('开始采集第' + url)
        result = get_blog(url);
        set_file_cache(key, result);
        print('采集成功')
        # else:
        # print( url+'已缓存')
    i = i + 1;

print('采集完成');
sys.exit();
