import scrapy, time
from datetime import datetime
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class RoscosmosNewsSpider(CommonSpider):
    name = 'roscosmos_news'
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }
    start_urls = ['http://en.roscosmos.ru/102/201301/']

    def parse(self, response, **kwargs):
        hrefs = response.xpath('//*[@class="newsarchive"]//a/@href').extract()
        self.redis.sadd(self.name, "http://en.roscosmos.ru/102/201301/")
        for href in hrefs:
            url = response.urljoin(href)
            if self.redis.sadd(self.name, url):
                yield scrapy.Request(url, callback=self.yearslist)

    def yearslist(self, response):
        newslist = response.xpath('//*[@class="newslist"]//a/@href').extract()
        for href in newslist:
            url = response.urljoin(href)
            if not self.redis.sismember(self.name, url):
                yield scrapy.Request(url=url, callback=self.acticle_parse)
            else:
                return None

    def acticle_parse(self, response):
        item = KyqbCollectionSpiderItem()
        item['data_address'] = response.url
        item['website_name'] = 'ROSCOSMOS'
        item['spider_name'] = self.name
        item['data_source_type'] = '新闻媒体'
        item['data_type'] = 'NEWS'
        item['collection_mode'] = 'spider'
        item['collection_time'] = datetime.now()
        date = response.xpath('//*[@class="date"]/text()').get().strip()
        item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S',
                                             time.strptime(date, "%B %d, %Y, %H:%M GMT"))
        title = response.xpath('//h2').extract_first()
        item['title'] = remove_tags(title).strip()
        item['abstract'] = None
        item['topic'] = None
        container = response.xpath('//*[@class="content"]').extract_first()
        item['text'] = remove_tags(container).strip()
        item['author'] = None
        image_urls = set()
        urls = response.xpath('//*[@class="content"]//img/@src').extract()
        for url in urls:
            image_urls.add(response.urljoin(url))
        item['image_urls'] = list(image_urls)
        yield item
