import scrapy, time
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class CaltechNewsSpider(CommonSpider):
    name = 'caltech_news'
    start_urls = []
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }
    base_url = 'https://www.caltech.edu/about/news?ordering=date&category=research+news&submit=Search&p={page_num}'
    for i in range(1, 127):  # 最大页数127
        start_urls.append(base_url.format(page_num=i))

    def parse(self, response):
        rows = response.css('.news-article-list__row .article-teaser')
        for row in rows:
            href = response.urljoin(row.xpath('.//a/@href').extract_first())
            if not self.redis.sismember(self.name, href):
                yield scrapy.Request(url=href, callback=self.article)

    def article(self, response):
        try:
            item = KyqbCollectionSpiderItem()
            item['data_address'] = response.url
            item['website_name'] = 'CALTECH'
            item['spider_name'] = self.name
            item['data_source_type'] = '科技网站'
            item['data_type'] = 'NEWS'
            item['collection_mode'] = 'spider'
            item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
            timestamp = response.xpath('//*[@class="publish-date-block__date"]/text()').extract_first().strip()
            item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%B %d, %Y"))
            title = response.xpath('//*[@class="news-hero-header-block__info__title"]/text()|'
                                   '//*[@class="simple-news-header-block__title mb-3"]/text()').extract_first()
            item['title'] = remove_tags(title)
            item['abstract'] = None
            postTags = [i.strip() for i in response.css('.news-info-block__tags a::text').extract()]
            item['topic'] = '!@@!'.join(postTags)
            # container = response.xpath('//div[@class="airspace-rich-text"]/text()').extract_first()
            # item['text'] = remove_tags(container).strip()
            ps = response.xpath('//div[@class="airspace-rich-text"]')
            texts = []
            for p in ps:
                containers = p.xpath('.//text()').extract()
                for container in containers:
                    text = remove_tags(container).strip()
                    texts.append(text)
            item['text'] = '\n'.join(texts)
            item['author'] = response.css('.news-writer-contact-block__contact__name--writer::text').extract_first()
            image_urls = set()
            urls = response.xpath('//div[@class="block-NewsHeroHeaderBlock"]//img/@src'
                                  '|//div[@class="block-SimpleNewsHeaderBlock"]//img/@src'
                                  '|//div[@class="airspace-rich-text"]//img/@src').extract()
            for url in urls:
                if '.jpg' in url or '.png' in url:
                    image_urls.add(response.urljoin(url))
            item['image_urls'] = list(image_urls)
            yield item
        except Exception as e:
            self.logger.debug(f"采集错误:{response.url}\n{e}")
