# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from gov.items import GovItem
from gov.public import sub
class GxSpider(CrawlSpider):
    name = 'xj'
    allowed_domains = ['www.xinjiang.gov.cn']
    start_urls = ['http://www.xinjiang.gov.cn']

    rules = (
        Rule(LinkExtractor(allow=r'\.html',unique=True), follow=True, callback='parse_item'),
        Rule(LinkExtractor(allow=r'\d{4}/\d{2}/\d{2}/.*\.html',unique=True), callback='parse_item'),
    )

    def parse_item(self, response):
        item = GovItem()
        news_cont = response.xpath('//div[contains(@class,"news_cont_1024")]')
        if news_cont:
            title = news_cont[0].xpath('.//div[@class="title"]')[0]
            item['title'] = sub(title.xpath('./h1/text()').extract_first())
            item['date'] = title.xpath('.//div[@class="t_left"]/span[1]/text()').extract_first().replace('时间：','').strip()
            item['source'] = title.xpath('.//div[@class="t_left"]/span[2]/text()').extract_first().replace('来源：','').strip()
            item['content'] = sub(''.join(news_cont.xpath('.//div[@id="news_content"]//text()').extract())).replace('&nbsp;','').strip()
            if not item['content']:
                item['content'] = sub(''.join(news_cont.xpath('.//div[@class="cont"]//text()').extract())).replace('&nbsp;','').strip()
            item['url'] = response.url
            yield item