#!/usr/bin/py2
# -*- coding: utf-8 -*-
#encoding=utf-8

from bs4 import BeautifulSoup
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.http import Request
from funsion.items import *

# @author Funsion Wu
class FunsionSpider(CrawlSpider):

    name = "funsion"
    allowed_domains = ['www.cnblogs.com']
    start_urls = ['http://www.cnblogs.com/funsion']
    rules = [Rule(LinkExtractor(allow=['.*/funsion/default.html?page=\d+']), follow=True),      # 分类页面，爬虫访问
             Rule(LinkExtractor(allow=['.*/funsion/p/*.html']), callback='parse_article', follow=False)]    # 详情页面，回调函数


    def parse(self, response):
        soup = BeautifulSoup(response.body, 'html.parser')
        page_list = soup.find(attrs={'class': 'topicListFooter'}).find_all('a')
        if page_list:
            for page in page_list:
                page_url = page.get('href')
                yield Request(page_url, callback=self.parse)

        article_list = soup.find_all('a', attrs={'class': 'c_b_p_desc_readmore'})
        if article_list:
            for article in article_list:
                article_url = article.attrs['href']
                yield Request(article_url, callback=self.parse_article)


    def parse_article(self, response):
        fc_item = FunsionItem()
        soup = BeautifulSoup(response.body, 'html.parser')
        container = soup.find(id='cnblogs_post_body')
        fc_item['source_url'] = response.url
        fc_item['title'] = soup.title.get_text()
        fc_item['article_content'] = container.prettify()
        article_image = self.__get_original_image(container)
        if article_image: fc_item['article_image'] = article_image
        return fc_item


    def __get_original_image(self, container):
        if not container:
            return None

        image_list = []
        content_images = container.find_all('img')
        if not content_images:
            return None

        for image in content_images:
            image_info = {}
            if image.get('src'):
                image_url = image['src']
                image_info['original'] = image_url
                image_list.append(image_info)

        return image_list
