import scrapy, time
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class EsaScienceExplorationSpider(CommonSpider):
    name = 'esa_Science_Exploration'
    base_url = 'https://www.esa.int/Science_Exploration'
    start_urls = ['https://www.esa.int/Science_Exploration/(archive)/0']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def parse(self, response, **kwargs):
        grid_items = response.css(".grid-item")
        for grid_item in grid_items:
            href = grid_item.xpath('./a/@href').extract_first()
            if href and not self.redis.sismember(self.name, href):
                url = response.urljoin(href)
                yield scrapy.Request(url, callback=self.article)
        next_page = response.xpath('//a[@title="Next Page"]/@href').extract_first()
        if next_page:
            yield scrapy.Request(response.urljoin(next_page), callback=self.parse)
        else:
            self.logger.info(f"{self.name}已爬完")

    def article(self, response):
        item = KyqbCollectionSpiderItem()
        item['data_address'] = response.url
        item['website_name'] = 'ESA'
        item['spider_name'] = self.name
        item['data_source_type'] = '科技网站'
        item['data_type'] = 'SCIENCE_EXPLORATION'
        item['collection_mode'] = 'spider'
        item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
        timestamp = response.xpath('//*[@class="meta article__item"]/span/text()').extract_first()
        item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, '%d/%m/%Y'))
        item['title'] = response.xpath('//*[@class="heading heading--main article__item"]/text()').extract_first()
        item['abstract'] = None
        item['topic'] = None
        containers = response.css('.article__block').extract()
        texts = []
        for container in containers:
            text = remove_tags(container)
            texts.append(text)
        item['text'] = '\n'.join(texts)
        item['author'] = None
        urls = set()
        imgs = response.xpath('//article//img/@src').extract()
        for img in imgs:
            if '.jpg' in img or '.png' in img:
                urls.add(response.urljoin(img))
        item['image_urls'] = list(urls)
        yield item
