from datetime import datetime
import scrapy, time, re
from w3lib.url import urljoin
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class NasaNewsSpider(CommonSpider):
    name = 'nasa_news'
    # www.nasa.gov/news/releases/latest/index.html
    start_urls = ['http://www.nasa.gov/news/releases/latest/index.html']

    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def start_requests(self):
        '''
        www.nasa.gov/news/releases/latest/index.html
        :return:
        '''
        self.base_url = 'https://www.nasa.gov/'
        self.api_url = 'https://www.nasa.gov/api/2/ubernode/_search'
        self.parmas = {
            'size': '24',
            'from': '0',
            'sort': 'promo-date-time:desc',
            'q': '((ubernode-type:feature OR ubernode-type:press_release) AND (other-tags:5168))',
            '_source_include': 'promo-date-time,master-image,nid,title,topics,missions,collections,other-tags,ubernode-type,primary-tag,secondary-tag,cardfeed-title,type,collection-asset-link,link-or-attachment,pr-leader-sentence,image-feature-caption,attachments,uri'
        }
        yield scrapy.FormRequest(url=self.api_url, method="GET", formdata=self.parmas, callback=self.parse)

    def parse(self, response, **kwargs):
        data = response.json()
        try:
            newslist = data['hits']['hits']
        except Exception as e:
            self.logger.debug(e)
            return None
        for one in newslist:
            url = urljoin(self.base_url, one['_source']["uri"])
            if not self.redis.sismember(self.name, url):
                yield scrapy.Request(url=url, callback=self.acticle_jump)
            else:
                self.logger.info("采集到上次的位置")
                return None
        if len(newslist) == 24:
            num = int(self.parmas['from'])
            self.parmas['from'] = str(num + 20)
            url = self.api_url
            params_list = list()
            for k, v in self.parmas.items():
                params_list.append(f"{k}={v}")
            url = url + "?" + "&".join(params_list)
            yield scrapy.Request(url=url, callback=self.parse)
        else:
            self.logger.info("爬取到最后一页")

    def acticle_jump(self, response):
        hrefs = re.findall(r'window.forcedRoute[\s\S]+?"(\S+)"', response.text)
        for href in hrefs:
            url = href if 'http' in href else 'https://www.nasa.gov/api/2' + href
            if not self.redis.sismember(self.name, url):
                yield scrapy.Request(url, callback=self.acticle_parse)

    def acticle_parse(self, response):
        try:
            data = response.json()
            if data['found']:
                url = response.urljoin(data['_source']['uri'])
                if not self.redis.sismember(self.name, url):
                    item = KyqbCollectionSpiderItem()
                    item['data_address'] = url
                    item['website_name'] = 'NASA'
                    item['spider_name'] = self.name
                    item['data_source_type'] = '科技网站'
                    item['data_type'] = 'NEWS'
                    item['collection_mode'] = 'spider'
                    item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
                    item['publish_time'] = datetime.fromtimestamp(int(data['_source']['changed']))
                    item['title'] = data['_source']['title']
                    item['abstract'] = None
                    item['text'] = remove_tags(data['_source']['body'])
                    item['author'] = data['_source']['name']
                    item['topic'] = None
                    image_url = data['_source']['master-image']['uri']
                    image = image_url if 'public://' not in image_url else image_url.replace('public://',
                                                                                             'https://www.nasa.gov/sites/default/files/')
                    item['image_urls'] = [image]
                    yield item
        except Exception as e:
            self.logger.debug(f"采集错误:{e.__traceback__.tb_lineno}\n{e}")
