from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
import scrapy, time
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonCrawlSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem

class PrincetonSpider(CommonCrawlSpider):
    name = 'princeton'
    start_urls = ['https://www.princeton.edu/news']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    rules = (
        Rule(LinkExtractor(restrict_xpaths=('//*[@class="pager__item pager__item--next"]')),
             callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        div_list = response.xpath('//*[@id="block-tony-content"]/div/div/div/div')
        for div in div_list:
            href = response.urljoin(div.xpath('.//a/@href').extract_first())
            if not self.redis.sismember(self.name, href):
                yield scrapy.Request(url=href, callback=self.article)
            else:
                return None

    def article(self, response):
        try:
            item = KyqbCollectionSpiderItem()
            item['data_address'] = response.url
            item['website_name'] = 'PRINCETON'
            item['spider_name'] = self.name
            item['data_source_type'] = '科技网站'
            item['data_type'] = 'NEWS'
            item['collection_mode'] = 'spider'
            item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
            timestamp = response.xpath('//*[@class="news-top"]/div[2]/text()').extract_first().strip()\
                .replace('noon','12:00 PM').replace('p.m.','PM').replace('a.m.','AM').replace('.','')\
                .replace('Sept','Sep').replace('July','Jul').replace('June','Jun').replace('April','Apr')\
                .replace('March','Mar')
            if ':' in timestamp:
                item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%b %d, %Y %I:%M %p"))
            else:
                item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%b %d, %Y %I %p"))
            title = response.xpath('//*[@class="news-top"]/h1/span/text()').extract_first()
            item['title'] = remove_tags(title)
            abs = response.xpath('//*[@class="node__content"]/div/p[1]//text()').extract()
            item['abstract'] = ''.join(abs)
            item['topic'] = None
            ps = response.xpath('//*[@class="node__content"]/div/p|//*[@class="node__content"]/div/article')
            texts = []
            for p in ps[1:]:
                containers = p.xpath('.//text()').extract()
                for container in containers:
                    text = remove_tags(container)
                    texts.append(text)
            item['text'] = '\n'.join(texts)
            author = response.xpath('//*[@class="news-top"]/div[1]//text()').extract()
            item['author'] = ''.join(author)
            image_urls = set()
            imgs = response.xpath('//*[@id="block-tony-content"]/article//img')
            for img in imgs:
                url = 'https://www.princeton.edu' + img.xpath('./@src').extract_first()
                image_urls.add(url)
            item['image_urls'] = list(image_urls)
            yield item
        except Exception as e:
            self.logger.debug(f"采集错误:{response.url}\n{e}")