import scrapy
from scrapy.selector import Selector
from ..items import NewsItem
from ..spiders import utils_crawler

source = u'参考消息网'


class ReferenceNewsSpider(scrapy.Spider):
    name = 'reference_news'
    # allowed_domains = ['http://column.cankaoxiaoxi.com/']
    start_urls = [
        "http://column.cankaoxiaoxi.com/",
        "http://china.cankaoxiaoxi.com/",
        "http://world.cankaoxiaoxi.com/",
        "http://mil.cankaoxiaoxi.com/",
        "http://finance.cankaoxiaoxi.com/",
        "http://tw.cankaoxiaoxi.com/",
        "http://science.cankaoxiaoxi.com/",
    ]

    def parse(self, response, **kwargs):
        item_urls = []
        sel = Selector(response)
        hrefs = sel.xpath('//div[@id="zuixin"]//a/@href').extract()
        for href in hrefs:
            if (href.find(".shtml") != -1) and (href.find("http://www.cankaoxiaoxi.com/photo") == -1):
                item_urls.append(href)

        for item_url in item_urls:
            yield scrapy.Request(url=item_url, callback=self.parse_details)

    def parse_details(self, response):
        item = NewsItem()

        sel = Selector(response)
        item['title'] = sel.xpath('//div[@class="bg-content"]/h1/text()').extract()[0].strip()
        item['href'] = response.url
        item['time'] = sel.xpath('//span[@id="pubtime_baidu"]/text()').extract()[0]
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@id="ctrlfscont"]//p/text()').extract())
        item['source'] = source
        item['image_urls'] = sel.xpath('//div[@id="ctrlfscont"]//img/@src').extract()

        return item
