# -*- coding: utf-8 -*-
import time
from urllib import parse

import scrapy
from scrapy import Request
from boles.items import BolesItem


class BlogSpider(scrapy.Spider):
    name = 'blog'
    allowed_domains = ['blog.jobbole.com']
    start_urls = ['http://blog.jobbole.com/all-posts/']
    url = ''

    def parse(self, response):
        xpath = '//*[@id="archive"]'
        next_xpath = xpath + '//a[@class="next page-numbers"]//@href'
        title_xpath = xpath + '//a[@class="archive-title"]'
        # 先去获取当前页面的文章
        for url in response.xpath(title_xpath + '//@href').extract():
            yield Request(url=parse.urljoin(response.url, str(url)),
                          callback=self.parse_detail)
        # 有下一页进行处理
        if len(response.xpath(next_xpath).extract()):
            yield Request(url=parse.urljoin(response.url, str(response.xpath(next_xpath).extract()[0])), callback=self.parse)

    # 对传过来的url 文章获取
    def parse_detail(self, response):
        blog = BolesItem()
        blog['title'] = response.xpath('//*[@class="entry-header"]/h1/text()').extract()[0]
        blog['date'] = str(
            response.xpath('normalize-space(//*[@class="entry-meta-hide-on-mobile"]/text())').extract()[0]).replace('·',
                                                                                                                    '').strip()
        blog['entry'] = response.xpath('//*[@class="entry"]/p/text()').extract()
        blog['url'] = response.url
        yield blog
