# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from ifeng.items import IfengItem

class SpSpider(scrapy.Spider):
    name = 'sp'
    allowed_domains = ['ifeng.com']
    start_urls = ['http://tech.ifeng.com/listpage/800/20180302/1/rtlist.shtml']
    y=2018
    m=3
    d=1
    url="http://tech.ifeng.com/listpage/800/"
    end="1/rtlist.shtml"
    rules = (
        Rule(SgmlLinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
    )
    
    def parse(self,response):
        for each in response.xpath("//div[@class='zheng_list pl10 box']"):
            item=IfengItem()
            item["category"]="tech"
            if len(each.xpath("./h1").extract())>0:
                item["title"]=each.xpath("./h1/a/@title").extract()[0]
            else:
                item["title"]=each.xpath("./h2/a/@title").extract()[0]
            item["content"]="xj"
            item["style"]=0
            item["auther"]="xj"
            item["time"]=each.xpath("./div[1]/span/text()").extract()[0]
            yield item

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        i = SpiderJrttItem()
        #i['domain_id'] = hxs.select('//input[@id="sid"]/@value').extract()
        #i['name'] = hxs.select('//div[@id="name"]').extract()
        #i['description'] = hxs.select('//div[@id="description"]').extract()
        return i
