from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
import scrapy, time, re
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonCrawlSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem



class SpacenewsfeedSpider(CommonCrawlSpider):
    name = 'spacenewsfeed'
    allowed_domains = ['spacenewsfeed.com']
    start_urls = ['https://spacenewsfeed.com/index.php']

    rules = (
        Rule(LinkExtractor(restrict_xpaths=('//*[@class="pagination-next"]/a')), callback='parse_item', follow=True),
    )

    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def parse_item(self, response):
        a_list = response.xpath('//*[@class="items-leading clearfix"]/div')
        for a in a_list:
            href = response.urljoin(a.xpath('.//a/@href').extract_first())
            if not self.redis.sismember(self.name, href):
                yield scrapy.Request(url=href, callback=self.article)
            else:
                return None

    def article(self, response):
        try:
            item = KyqbCollectionSpiderItem()
            item['data_address'] = response.url
            item['website_name'] = 'SPACENEWSFEED'
            item['spider_name'] = self.name
            item['data_source_type'] = '新闻媒体'
            item['data_type'] = 'NEWS'
            item['collection_mode'] = 'spider'
            item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
            key = response.xpath('//*[@itemprop="articleBody"]/p[1]/strong/text()').extract_first()
            if key:
                strong = re.compile(r'\((\d+ \D+ \d+) - (.*?)\) (.*)').findall(key)
                if strong:
                    for timestamp, postTags, ab in strong:
                        try:
                            p_time = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%d %B %Y"))
                        except:
                            item['publish_time'] = None
                        else:
                            item['publish_time'] = p_time
                        item['topic'] = postTags
                        item['abstract'] = ab
                else:
                    item['publish_time'] = None
                    item['topic'] = None
                    item['abstract'] = None
            else:
                key = response.xpath('//*[@itemprop="articleBody"]/p[1]/text()').extract_first()
                strong = re.compile(r'\((\d+ \D+ \d+) - (.*?)\) (.*)').findall(key)
                if strong:
                    for timestamp, postTags, ab in strong:
                        try:
                            p_time = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%d %B %Y"))
                        except:
                            item['publish_time'] = None
                        else:
                            item['publish_time'] = p_time
                        item['topic'] = postTags
                        item['abstract'] = ab
                else:
                    item['publish_time'] = None
                    item['topic'] = None
                    item['abstract'] = None
            title = response.xpath('//*[@itemprop="articleBody"]/h1//text()').extract_first()
            item['title'] = remove_tags(title)
            ps = response.xpath('//*[@itemprop="articleBody"]/p')
            texts = []
            for p in ps:
                containers = p.xpath('./text()').extract()
                for container in containers:
                    text = remove_tags(container).strip()
                    texts.append(text)
            item['text'] = '\n'.join(texts)
            item['author'] = None
            image_urls = set()
            urls = response.xpath('//*[@itemprop="articleBody"]/p')
            for url in urls:
                url = response.urljoin(url.xpath('.//img/@src').extract_first())
                if '.jpg' in url or '.jpeg' in url or '.png' in url:
                    image_urls.add(url)
            item['image_urls'] = list(image_urls)
            yield item
        except Exception as e:
            self.logger.debug(f"采集错误:{response.url}\n{e}")
