import scrapy, time
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class C4isrSpider(CommonSpider):
    name = 'c4isr'
    allowed_domains = ['c4isrnet.com']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }
    start_urls = []
    for a in range(223):
        url = 'https://www.c4isrnet.com/pb/api/v2/render/feature/global/mco-results-list-load-more?' \
              'contentConfig=%7B%22_jge%22%3A%22content-feed%22%2C%22Feed-Parameter%22%3A%22%2Fhome%' \
              '22%2C%22Feed-Limit%22%3A%227%22%2C%22Feed-Offset%22%3A{}%7D&customFields=%7B%' \
              '22artworkPosition%22%3A%22right%22%2C%22offset%22%3A%220%22%2C%22' \
              'commentsCountCivil%22%3A%22false%22%2C%22showAuthor%22%3A%22true%22%2C%22' \
              'showDate%22%3A%22true%22%2C%22commentsCountDisqus%22%3A%22false%22%2C%22numItems%' \
              '22%3A%225%22%2C%22formattingOption%22%3A%22relative%22%2C%22enabledLoadMore%22%3A%22' \
              'true%22%2C%22showDescription%22%3A%22true%22%2C%22dateType%22%3A%22displayOnly' \
              '%22%7D&service=content-feed'.format(a*7)
        start_urls.append(url)

    def parse(self, response):
        a_list = response.xpath('//h5/a')
        for a in a_list:
            href = response.urljoin(a.xpath('./@href').extract_first().replace('\\"',''))
            if not self.redis.sismember(self.name, href):
                yield scrapy.Request(url=href, callback=self.article)
            else:
                return None

    def article(self, response):
        try:
            item = KyqbCollectionSpiderItem()
            item['data_address'] = response.url
            item['website_name'] = 'C4ISR'
            item['spider_name'] = self.name
            item['data_source_type'] = '新闻媒体'
            item['data_type'] = 'NEWS'
            item['collection_mode'] = 'spider'
            item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
            timestamp = response.xpath('//*[@class="m-byline__publishDate m-iconText"]/text()').extract_first().strip()
            if 'ago' in timestamp:
                timestamp = time.strftime('%Y-%m-%d')
                item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%Y-%m-%d"))
            else:
                if ',' not in timestamp:
                    timestamp = timestamp + ', 2020'
                item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, "%B %d, %Y"))
            title = response.xpath('//*[@class="m-articleHeader"]/h1/text()').extract_first()
            item['title'] = remove_tags(title)
            item['abstract'] = None
            item['topic'] = response.xpath('//*[@class="m-articleHeader"]/h2/a/text()').extract_first().strip()
            ps = response.xpath('//*[@id="article-content"]/p')
            texts = []
            for p in ps:
                containers = p.xpath('.//text()').extract()
                for container in containers:
                    text = remove_tags(container).strip()
                    texts.append(text)
            item['text'] = '\n'.join(texts)
            author = [i.strip() for i in response.xpath('//*[@class="m-byline__author"]/span/span//text()').extract()]
            if author:
                item['author'] = '!@@!'.join(author)
            else:
                item['author'] = None
            image_urls = set()
            urls = response.xpath('//*[@class="m-byline__figure"]/img/@src').extract()
            for url in urls:
                image_urls.add(url)
            item['image_urls'] = list(image_urls)
            yield item
        except Exception as e:
            self.logger.debug(f"采集错误:{response.url}\n{e}")
