import scrapy, time
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class NasaNewsSpider(CommonSpider):
    name = 'jpl_nasa_news'
    start_urls = ['https://www.jpl.nasa.gov/news/']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def __init__(self, *args, **kwargs):
        self.repeat = 0
        self.page_num = 0
        self.xhr_url = 'https://www.jpl.nasa.gov/assets/json/getMore.php?news=true&&page={page_num}'
        self.base_url = 'https://www.jpl.nasa.gov/news/'
        super(NasaNewsSpider, self).__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        url = self.xhr_url.format(page_num=self.page_num)
        self.page_num = self.page_num + 1
        headers = response.headers.to_unicode_dict()
        new_headers = {}
        # new_headers['cookie'] = headers.pop('set-cookie')
        new_headers['x-requested-with'] = 'XMLHttpRequest'
        new_headers['content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
        new_headers['referer'] = self.base_url
        self.headers = new_headers
        yield scrapy.Request(url=url, headers=self.headers, callback=self.xhr_parse)

    def xhr_parse(self, response):
        data = response.json()
        articeles_list = data.get('items')
        for articeles in articeles_list:
            data_address = response.urljoin(articeles.get("link"))
            if not self.redis.sismember(self.name, data_address):
                item = KyqbCollectionSpiderItem()
                urls = set()
                item['data_address'] = data_address
                item['website_name'] = 'JPL_NASA'
                item['spider_name'] = self.name
                item['data_source_type'] = '科技网站'
                item['data_type'] = 'NEWS'
                item['collection_mode'] = 'spider'
                item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
                timestamp = articeles.get("date")
                if timestamp:
                    timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, '%B %d, %Y'))
                item['publish_time'] = timestamp
                item['title'] = articeles.get("title")
                item['abstract'] = articeles.get("tease")
                item['topic'] = None
                text = articeles.get("body")
                if text:
                    html = scrapy.Selector(text=text)
                    hrefs = html.xpath("//img/@src").extract()
                    for href in hrefs:
                        urls.add(href)
                    text = remove_tags(text)
                item['text'] = text
                item['author'] = articeles.get("author")
                images = articeles.get("images")
                if images:
                    for i in images.values():
                        src = i.get("caption")
                        if src:
                            html = scrapy.Selector(text=src)
                            hrefs = html.xpath("//a")
                            for href in hrefs:
                                href = response.urljoin(href.xpath('./@href').extract_first().replace('..',''))
                                if ".jpg" in href or ".png" in href:
                                    urls.add(href)
                item['image_urls'] = list(urls)
                yield item
            elif self.repeat < 5:
                self.repeat = self.repeat + 1
            else:
                self.logger.info(f"爬取到上一次位置:{data_address}")
                return None
        if data['more']:
            url = self.xhr_url.format(page_num=self.page_num)
            self.page_num = self.page_num + 1
            yield scrapy.Request(url, headers=self.headers, callback=self.xhr_parse)
        else:
            self.logger.info(f'{self.name}已爬取完毕')
