# -*- coding: utf-8 -*-
import scrapy

from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.contrib.linkextractors import LinkExtractor

from scrapy.http import Request

from movie.items import Blog

class BlogSpider(scrapy.Spider):
    k = 46
    name = "blog"
    allowed_domains = ["www.liqingbo.cn"]
    start_urls = [
        "http://www.liqingbo.cn/category/1/10",
    ]

    def parse(self, response):
        #yield Request(self.start_urls[0],callback=self.parse, headers=self.headers,cookies=self.cookies, meta=self.meta)
        data = response.selector.xpath('//div[@id="content"]/section//section')
        url = ''
        for site in data:
            url = site.xpath('div[@class="post-title"]/h5/a/@href').extract()
            yield Request(url[0], callback=self.detailParse)

        yield Request(self.next_url(), callback=self.parse)

    def detailParse(self, response):
        data = response.selector.xpath('//div[@id="content"]/section//section')
        items = []
        for site in data:
            item = Blog()
            item['title'] = site.select('div[@class="post-title"]/h3/text()').extract()[0]
            item['time'] = site.select('div[@class="post-date"]//span')[3].select('text()').extract()[0]
            items.append(item)
        return items

    def next_url(self):
        self.k -= 1
        if self.k == 0:
            return
        url = "http://www.liqingbo.cn/category/1/" + str(self.k)
        return url