# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider

class SinaCrawlSpider(RedisCrawlSpider):
    name = 'sina_crawl'
    allowed_domains = ['sina.com.cn']
    # start_urls = ['http://news.sina.com.cn/guide/']
    redis_key = "sina"
    rules = (
        #匹配大分类
        Rule(LinkExtractor(restrict_xpaths=("//div[@class='clearfix']/h3",)),callback="parse1", follow=True),

        #匹配小分类
        Rule(LinkExtractor(restrict_xpaths=("//div[@class='clearfix']/ul/li",)),callback="parse2", follow=True),

        #匹配新闻列表
        Rule(LinkExtractor(restrict_xpaths=("//div[contains(@class,'news-item')]/h2/a",)),callback="parse_item",),

    )

    def parse1(self,response):
        # print(response.url)
        pass
    def parse2(self,response):
        print(response.url)
        # pass
    def parse_item(self, response):
        i = {}
        i["title"] = response.xpath("//div[@class='second-title']/text()").extract_first()
        print(i)
