import scrapy
from scrapy.selector import Selector
from ..items import NewsItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ..spiders import utils_crawler

source = u'宣家讲网大讲堂'

class XuanjiajiangJiangtangSpider(scrapy.Spider):
    name = 'xuanjiajiang_jiangtang'
    allowed_domains = ["www.71.cn"]
    start_urls = [
        'http://www.71.cn/sddjt/djt/2006nd/',
        # 'http://www.71.cn/sddjt/djt/2007nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2008nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2009nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2010nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2011nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2012nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2013nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2014nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2015nd/1.shtml',
        # 'http://www.71.cn/sddjt/djt/2016nd/',
        # 'http://www.71.cn/sddjt/djt/2017nd/',
    ]
    rules = [  # 定义爬取URL的规则
        # 抓取n页内容
        Rule(LinkExtractor(allow=("/sddjt/djt/2006nd/")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2007nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2008nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2009nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2010nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2011nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2012nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2013nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2014nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2015nd/([1-3]+).shtml")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2016nd/")), follow=True, callback='parse_item'),
        # Rule(LinkExtractor(allow=("/sddjt/djt/2017nd/")), follow=True, callback='parse_item'),
    ]

    def parse_item(self, response):
        sel = Selector(response)
        item_urls = sel.xpath('//li[@class="isimg"]/div[@class="articlelist_title"]//a/@href').extract()

        for item_url in item_urls:
            yield scrapy.Request(url=item_url, callback=self.parse_details)

    def parse_content(self, response):
        item = NewsItem()
        sel = Selector(response)
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@id="article-content"]//p/text()').extract())
        return item['content']

    def parse_details(self, response):
        item = NewsItem()
        sel = Selector(response)

        item['title'] = sel.xpath('//div[@class="inner"]/h1/text()').extract()[0]
        item['href'] = response.url
        item['time'] = sel.xpath('//span[@class="date"]/text()').extract()[0].encode('utf-8') + ':00'
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@id="article-content"]//p/text()').extract())
        item['source'] = source
        item['image_urls'] = []

