import scrapy, time
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem

class EthzSpider(CommonSpider):
    name = 'ethz'
    start_urls = ['https://ethz.ch/en/news-and-events/eth-news/_jcr_content/par/newsfeed_0.page.220.html']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def parse(self, response):
        div_list = response.xpath('//*[@class="newsList last-child"]/div')
        for div in div_list:
            href = div.xpath('.//h2/a/@href').extract_first()
            if not self.redis.sismember(self.name, href):
                yield scrapy.Request(url=href, callback=self.article)
            # else:
            #     return None

    def article(self, response):
        try:
            item = KyqbCollectionSpiderItem()
            item['data_address'] = response.url
            item['website_name'] = 'ETH'
            item['spider_name'] = self.name
            item['data_source_type'] = '科技网站'
            item['data_type'] = 'NEWS'
            item['collection_mode'] = 'spider'
            item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
            timestamp = response.xpath('//*[@class="info"]/p[1]/text()').extract_first().replace('|','').strip()
            item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%d.%m.%Y"))
            title = response.xpath('//*[@class="newsArticle"]/h1/text()').extract_first()
            item['title'] = remove_tags(title)
            abs = response.xpath('//*[@class="newsArticle"]/div[2]/p/text()').extract()
            item['abstract'] = ''.join(abs)
            postTags = [i.strip() for i in response.xpath('//*[@class="info"]/p[1]/a/text()').extract()]
            if postTags:
                item['topic'] = '!@@!'.join(postTags)
            else:
                item['topic'] = None
            containers = response.xpath('//*[@class="newsArticle"]/div[3]//text()').extract()
            texts = []
            for container in containers:
                text = remove_tags(container).strip()
                texts.append(text)
            item['text'] = '\n'.join(texts)
            author = response.xpath('//*[@class="info"]/p[2]/a/text()').extract_first()
            if author:
                item['author'] = remove_tags(author).strip()
            else:
                item['author']= None
            image_urls = set()
            urls = response.xpath('//*[@class="newsArticle"]/div[3]//img/@src').extract()
            for url in urls:
                if '.jpg' in url:
                    image_urls.add(response.urljoin(url))
            item['image_urls'] = list(image_urls)
            yield item
        except Exception as e:
            self.logger.debug(f"采集错误:{response.url}\n{e}")
