# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from blog.items import BlogItem
from redis import StrictRedis
from blog import conf


class BlogSpiderSpider(CrawlSpider):
	name = 'blog_spider'
	allowed_domains = ['cnblogs.com']
	start_urls = ['https://www.cnblogs.com/cate/python/1']

	# redis调度队列的名称; 好像是分布式用的, 现在没啥用了
	redis_key = 'blog_start'

	rules = (
		Rule(LinkExtractor(allow=r'.*?cate/python/\d+'), follow=True, callback='parse_url', ),
	)

	# redis连接对象
	redis_obj = StrictRedis(host=conf.REDIS_HOST, port=conf.REDIS_PORT)

	def parse_url(self, response):
		"""解析详情页的url; 用于增量爬虫"""
		# 获取详情页url
		detail_url_li = response.xpath("//div[@class='post_item']//a[@class='titlelnk']/@href").getall()

		# 将详情页的url放到调度器内
		for detail_url in detail_url_li:
			# url如果不在redis中, 说明没爬过, 就把url放到调度器中
			if not self.redis_obj.sismember(conf.REDIS_URLS, detail_url):
				yield scrapy.Request(url=detail_url, callback=self.parse_detail)

	def parse_detail(self, response):
		"""解析详情页的方法"""

		# 将详情页的url加入到reids
		self.redis_obj.sadd(conf.REDIS_URLS, response.url)

		content_tmp = response.xpath("//div[@id='cnblogs_post_body']").getall()
		content = ''.join(list(filter(lambda s: s and s.strip(), content_tmp)))
		title = response.xpath("//a[@id='cb_post_title_url']/text()").get()
		author = response.xpath("//a[@id='Header1_HeaderTitle']/text()").get()
		item = BlogItem(content=content, title=title, author=author)
		yield item