from sina.items import SinaItem
import scrapy
from scrapy.http import Request

class AuthorspiderSpider(scrapy.Spider):
    name = 'authorSpider'
    allowed_domains = ['blog.sina.com.cn']
    start_urls = ['http://blog.sina.com.cn/s/articlelist_1928056722_0_1.html']

    def parse(self, response):
        index="//ul[@class='SG_pages']/li/a/@href"
        item="////span[@class='atc_title']/a/@href"

        subSelector = response.xpath(index)
        for sub in subSelector:
            yield Request(sub.extract())
        itemSelector=response.xpath(item)
        for item in itemSelector:
            yield Request(item.extract(), callback=self.parse_item)
    def parse_item(self,response):
        print(response.xpath("//head/title").extract())
        title = "//h2/text()"
        read = "//div[@class='articalInfo']//span[1]/text()"
        id="//h2/@id"
        item=SinaItem()
        item['title']=response.xpath(title).extract()[0]
        item['read']=response.xpath(read).extract()
        item['id']=response.xpath(id).extract()[0][2:]
        item['url']=response.url
        item['authorId']=response.xpath("//h1[@class='blogtitle']/a/@href").extract()[0][24:]
        item['content']="".join(response.xpath("//div[contains(@class,'articalContent')]/*").getall())
        return item