# -*- coding:UTF-8 -*-
# coding:utf-8
import scrapy
from scrapy_test.items import ScrapyTestItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')



class CSDNSpider(scrapy.Spider):
	name = "csdn"
	# 减慢爬取速度 为1s
	download_delay = 1
	allowed_domains = ["blog.csdn.net"]
	start_urls = ['http://blog.csdn.net/qq_30242609/article/details/53044109']


	def start_requests(self):
		##urls = ['http://blog.csdn.net/u012150179/article/details/11749017']
		for url in self.start_urls:
			yield scrapy.Request(url=url, callback=self.parse)

	def get_title(self, response):
		##print response.body
		article_name = response.xpath('//*[@id="article_details"]/div[1]/h1/span/a/text()').extract()
		l = ScrapyTestItem()
		l['title']=article_name
		l['link'] = response.url
		return l

	def parse(self, response):
		##item=ScrapyTestItem()  $('.hot_blog li a')[1].text
		## 返回list
		## 返回生成器
		## 返回字典
		## scrapy.Request
		l =ScrapyTestItem()
		#items=[]
		article_url = str(response.url)
		article_name = response.xpath('//*[@id="article_details"]/div[1]/h1/span/a/text()').extract()
		l['title']=[n.encode('utf-8') for n in article_name]
		l['link']= 'hello world'
		yield l

		n = ScrapyTestItem()
		for info in response.css('#hotarticls .itemlist a::attr(href)').extract():
			n['link']='http://'+self.allowed_domains[0]+info
			yield scrapy.Request(url=n['link'],callback=self.get_title)







class MusicSpider(scrapy.Spider):
	name = "music"
	# 减慢爬取速度 为1s
	download_delay = 1
	allowed_domains = ["bbs.musicool.cn"]
	start_urls = ['http://bbs.musicool.cn/forum-97-1.html']


	def start_requests(self):
		##urls = ['http://blog.csdn.net/u012150179/article/details/11749017']
		for url in self.start_urls:
			yield scrapy.Request(url=url, callback=self.parse)

	def get_title(self, response):
		##print response.body
		article_name = response.xpath('//*[@id="article_details"]/div[1]/h1/span/a/text()').extract()
		l = ScrapyTestItem()
		l['title']=article_name
		l['link'] = response.url
		return l

	def parse(self, response):
		##item=ScrapyTestItem()  $('.hot_blog li a')[1].text
		## 返回list
		## 返回生成器
		## 返回字典
		## scrapy.Request
		# l =ScrapyTestItem()
		# #items=[]
		# article_url = str(response.url)
		# article_name = response.xpath('//*[@id="article_details"]/div[1]/h1/span/a/text()').extract()
		# l['title']=[n.encode('utf-8') for n in article_name]
		# l['link']= 'hello world' stickthread_
		# yield l

		n = ScrapyTestItem()
		for info in response.xpath('//*[contains(@id,"stickthread")]/a/@href').extract():
			print info

			#yield scrapy.Request(url=n['link'],callback=self.get_title)



