import scrapy
from datetime import datetime
import time, os
from kyqb_collection_spider.items import KyqbCollectionSpiderItem
from kyqb_collection_spider.spiders.common import CommonSpider
from w3lib.html import remove_tags


class JaxaPressSpider(CommonSpider):
    name = 'jaxa_press'
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }
    start_urls = []
    for year in range(2014, 2021):
        start_urls.append(f"https://global.jaxa.jp/press/{year}/")

    def parse(self, response):
        releases = response.css(".press_release")
        for release in releases:
            hrefs = release.xpath(".//a/@href").extract()
            for href in hrefs:
                url = response.urljoin(href)
                if not self.redis.sismember(self.name, url):
                    yield scrapy.Request(url=url, callback=self.acticle_parse)
                else:
                    return None

    def acticle_parse(self, response):
        try:
            item = KyqbCollectionSpiderItem()
            item['data_address'] = response.url
            item['website_name'] = 'JAXA'
            item['spider_name'] = self.name
            item['data_source_type'] = '科技网站'
            item['data_type'] = 'PRESS'
            item['collection_mode'] = 'spider'
            item['collection_time'] = datetime.now()
            item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S',
                                                 time.strptime(os.path.basename(response.url)[:8], "%Y%m%d"))
            title = response.xpath('//*[@class="elem_heading_lv2_pad"]').extract_first()
            item['title'] = remove_tags(title)
            abstract = response.xpath('//*[@class="elem_paragraph"]//*[@class="right"]').extract()
            item['abstract'] = remove_tags(' '.join(abstract).strip())
            item['topic'] = None
            container = response.xpath('//div[@id="area_content"]').extract_first()
            item['text'] = remove_tags(container).strip()
            item['author'] = None
            image_urls = set()
            urls = response.xpath('//div[@id="area_content"]//img/@src').extract()
            for url in urls:
                image_urls.add(response.urljoin(url))
            item['image_urls'] = list(image_urls)
            yield item
        except Exception as e:
            self.logger.debug(e)
