#!/usr/bin/python
# -*- coding: gbk -*-
import time

from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from slyy.items import SlyyItem

class SlyySpider(BaseSpider):
    name = "slyy3"
    allowed_domains = ["txw1958.blog.163.com"]
    #start_urls = ["http://txw1958.blog.163.com/"]
    start_urls = ["http://txw1958.blog.163.com/blog/static/188725046201262492446552/"]
    
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        items = []

        # firspost = hxs.select('''//*[@id="blog-163-com-left"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[2]/div/div[2]/div[1]/h3/a/@href''').extract()[0]
        # print firspost
        
        firspost = hxs.select('''//html/body/div[3]/div[4]/div/div/div/div[2]/div/div/div[2]/div/div/div/a/@href''').extract()[0]
        items.extend([self.make_requests_from_url(firspost).replace(callback=self.parse_post)])

        url2 = hxs.select('''//html/body/div[3]/div[4]/div/div/div/div[2]/div/div/div[2]/div/div/div/a/@href''').extract()[0]
        items.append(self.make_requests_from_url(url2))
        
        return items
        
    def parse_post(self, response):
        hxs = HtmlXPathSelector(response)
        h3 = hxs.select('''//*[@id="-3"]/div[2]/div[1]/div/div[2]/div/div[2]/div[1]/div[1]/div/div/h3/span[1]/text()''').extract()[0]
        print h3
        item = SlyyItem()
        item['url'] = response.url
        item['head'] = h3
        return item
