# coding:utf-8
import scrapy
from scrapy.selector import Selector
from scrapy.spiders import Spider
from ..items import NewsItem
from ..spiders import utils_crawler

source = u'新华网'


class XinhuaSpider(Spider):
    name = "xinhua"
    start_urls = [
        "http://www.xinhuanet.com/mrdx/plandsy.htm",
        "http://www.xinhuanet.com/mrdx/ttxw.htm"
    ]

    def parse(self, response):
        item_urls = []
        sel = Selector(response)
        classify = sel.xpath('//td[@class="fs14 red3 fb lh30"]/text()').extract()[0]

        hrefs = sel.xpath('//a/@href').extract()
        for href in hrefs:
            if href.find('http://news.xinhuanet.com/mrdx/') != -1:
                item_urls.append(href)
        for item_url in item_urls:
            yield scrapy.Request(url=item_url, meta={'classify': classify}, callback=self.parse_details)

    def parse_details(self, response):
        item = NewsItem()
        sel = Selector(response)

        item['title'] = sel.xpath('//div[@id="Title"]/text()').extract()[0].strip()
        item['href'] = response.url
        item['time'] = utils_crawler.deal_time(sel.xpath('//td[@align="left"]/text()[1]').extract()[0].split('\r\n')[1])
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@id="Content"]//text()').extract())
        item['source'] = source
        href_temp = str(item['href'])
        href = href_temp[:href_temp.rfind('/') + 1]
        item['image_urls'] = utils_crawler.deal_img(href, sel.xpath('//div[@id="Content"]//img/@src').extract())

        return item
