#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'moxuandeng'

from scrapy.spiders import Spider
from scrapy.http import Request
from tutorial.items import QbStorie


class QbSpider(Spider):
    name = "qiushibaike"
    allowed_domains = "qiushibaike.com"
    start_urls = [
        'http://www.qiushibaike.com/hot/page/1'
    ]
    base_url = 'http://www.qiushibaike.com'

    def parse(self, response):
        articles = response.xpath('//div[contains(@class,"article")]')
        for article in articles:
            author_sel = article.xpath('div[@class="author"]/a')
            author_name = author_sel.xpath('text()').extract()
            author_img = author_sel.xpath('img/@src').extract()

            content_sel = article.xpath('div[@class="content"]')
            content = content_sel.xpath('text()').extract()

            thumb = article.xpath('div[@class="thumb"]/a/img/@src').extract()

            video_sel = article.xpath('div[@class="video_hold"]/video')
            video_image = video_sel.xpath('@poster').extract()
            video = video_sel.xpath('source/@src').extract()

            stats_vote = article.xpath(
                'div[@class="stats"]/span[@class="stats-vote"]/i[@class="number"]/text()').extract()

            author_name = self.default_data(author_name)
            author_img = self.default_data(author_img)
            thumb = self.default_data(thumb)
            video_image = self.default_data(video_image)
            video = self.default_data(video)

            item = QbStorie()

            item['author_name'] = author_name[1].replace("\n", " ")
            item['author_img'] = author_img[0]
            item['content'] = content[0]
            item['thumb'] = thumb
            item['video_image'] = video_image[0]
            item['video'] = video[0]
            item['stats_vote'] = stats_vote[0]

            yield item

        next_page = []
        # 获取下页连接地址
        try:
            next_page = response.xpath(
                '//div[@class="pageto"]/a[@class="next"]/@href').extract()
        except Exception, e:
            print e

        if len(next_page) == 0:
            pass
        else:
            next_page = self.base_url + next_page[0]
            yield Request(next_page, callback=self.parse)

    def parse_article(self, article):
        pass

    def default_data(self, data):
        if len(data) == 0:
            data = "NN"
        return data
