#!/usr/bin/env python
# encoding: utf-8
import sys
import codecs

sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

import scrapy
from scrapy.http import Request
from urllib import parse
from jobbloe1.items import JobbloeItem
from scrapy_redis.spiders import RedisSpider


class BlogSpider(RedisSpider):
    name = 'blog'
    allowed_domains = ['blog.jobbole.com', 'python.jobbole.com']
    # start_urls = ['http://python.jobbole.com/all-posts/']
    redis_key = 'blog:start_urls'

    def parse(self, response):
        # 获取到下一页的url地址
        next_page = response.xpath('//div[@class="navigation margin-20"]/a[last()]/@href').get()
        if next_page:
            yield Request(url=parse.urljoin(response.url, next_page), callback=self.parse)

        url_lists = response.xpath('//div[@id="archive"]/div[@class="post floated-thumb"]')
        for current_url in url_lists:
            detail_link = current_url.xpath('./div[@class="post-meta"]//a/@href').extract_first()
            title = current_url.xpath('./div[@class="post-meta"]//a[@class="archive-title"]/text()').extract_first()
            # 重新请求,将title传递到下一个url中
            yield Request(url=parse.urljoin(response.url, detail_link), callback=self.detail_parse,
                          meta={'title': title})

    def detail_parse(self, response):
        """
        获取详情页面数据
        :return:
        """
        print('---当前url地址----', response.url)
        info = response.xpath('//div[@class="entry-meta"]/p[@class="entry-meta-hide-on-mobile"]/text()').get().strip()
        # 获取上一个url传递过来meta信息
        title = response.meta.get('title', '')
        print(title, '==>', info)
        yield JobbloeItem(title=title, info=info)
