#-*- encoding: UTF-8 -*-
#----------------------------import------------------------------
import scrapy
import urlparse
import re
from bdbk.items import BdbkItem
from scrapy import Request
from scrapy import Selector
#---------------------------------------------------------------------

class bdbkSpider(scrapy.Spider):
	name = "whois"
	start_urls = ["http://www.qiushibaike.com"]

	def parse(self, response):
		basic = "http://www.qiushibaike.com"
		duanzi_list = response.xpath('//div[@class="article block untagged mb15"]/a[1]')
		#duanzi_list = response.selector.xpath('//div[@class="article block untagged mb15"]/a[1]/@href')
		for duanzi in duanzi_list:
			the_href = duanzi.xpath('/@href')
			url = urlparse.urljoin(basic, the_href)
			yield scrapy.Request(url, callback = self.parse_content)
				
		#next_pages = response.xpath('//*[@id="content-left"]/ul/li[8]/a')
		next_pages = response.xpath('//*[@id="content-left"]/ul/li[8]/a')
		if next_pages:
			next_href = next_pages.xpath('/@href')
			next_page = urlparse.urljoin(basic, next_href)
			yield scrapy.Request(next_page, callback = self.parse)

	def parse_content(self, response):
		item = BdbkItem()
		item['link'] = response.url
		item['content'] = response.xpath('//*[@id="single-next-link"]/div').extract()
		item['name'] = response.xpath('//div[@class="author clearfix"]/a[2]/h2').extract()
		yield item
