# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from xjnu.items import XjnuItem


class XjnuNewsSpider(CrawlSpider):
    name = 'xjnu_news'
    allowed_domains = ['news.xjnu.edu.cn']
    start_urls = ['http://news.xjnu.edu.cn/']

    rules = (
        Rule(LinkExtractor(allow=r'news.xjnu.edu.cn/\d+/list\d*.htm'), follow=True),
        Rule(LinkExtractor(allow=r'news.xjnu.edu.cn/\d+/\d+/.+/page.htm'), callback='parse_item')
    )

    def parse_item(self, response):
        item = XjnuItem()
        item['title'] = response.xpath('//title/text()').extract_first()
        item['data'] = response.css('.Article_PublishDate::text').extract_first()
        item['content'] = ''.join(response.css('.Article_Content').xpath('.//span/text()').extract()).replace('\xa0', '').replace('\u3000', '')
        item['annex_name'] = str(response.css('.Article_Content').xpath('.//a/text()').extract())
        item['annex_url'] = str(response.css('.Article_Content').xpath('.//a/@href').extract())

        return item
