import scrapy, time
from lxml import html
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem


class SpacenewsCommercialSpider(CommonSpider):
    name = 'spacenews_commercial'
    start_urls = ['https://spacenews.com/section/commercial/']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def __init__(self, *args, **kwargs):
        self.repeat = 0
        self.page_num = 1
        self.page_max_num = 500
        self.post_param = {
            'action': 'get_posts',
            'paged': {},
            'taxonomy': 'section',
            'section': 'commercial',
            'term': 'commercial',
            'order': 'DESC',
            'lazy_load_term_meta': 'true',
            'posts_per_page': '15',
            'templatePart': 'posts-taxonomy'
        }
        self.base_url = 'https://spacenews.com/section/commercial/'
        self.url_api = 'https://spacenews.com/wp-admin/admin-ajax.php'
        super(SpacenewsCommercialSpider, self).__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        self.post_param['paged'] = str(self.page_num)
        self.page_num = self.page_num + 1
        get_params = []
        for k, v in self.post_param.items():
            get_params.append(f"{k}={v}")
        yield scrapy.Request(url=f"{self.url_api}?{'&'.join(get_params)}", callback=self.xhr_parse)

    def xhr_parse(self, response):
        data = response.json()
        if data['next']:
            posts = data['posts']
            for post in posts:
                url = post['permalink']
                if not self.redis.sismember(self.name, url):
                    yield scrapy.Request(url, callback=self.article)
                elif self.repeat < 5:
                    self.repeat = self.repeat + 1
                else:
                    self.logger.info(f"数据采集到完成:{url}")
                    return None
            if self.page_num < self.page_max_num:
                self.post_param['paged'] = str(self.page_num)
                self.page_num = self.page_num + 1
                get_params = []
                for k, v in self.post_param.items():
                    get_params.append(f"{k}={v}")
                yield scrapy.Request(url=f"{self.url_api}?{'&'.join(get_params)}", callback=self.xhr_parse)
            else:
                self.logger.info(f"已完成最大爬取页数:{self.page_num}页")
        else:
            self.logger.info(f'{self.name}:数据已采集完成')

    def article(self, response):
        doc = html.fromstring(response.text)
        scripts = doc.findall('.//script')
        for script in scripts:
            script.drop_tree()
        text = html.tostring(doc=doc)
        resp = scrapy.Selector(text=text)
        item = KyqbCollectionSpiderItem()
        item['data_address'] = response.url
        item['website_name'] = 'SPACENEWS'
        item['spider_name'] = self.name
        item['data_source_type'] = '新闻媒体'
        item['data_type'] = 'COMMERCIAL'
        item['collection_mode'] = 'spider'
        item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
        timestamp = resp.xpath('//time[@datetime]/@datetime').extract_first()
        item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, '%Y-%m-%d'))
        item['title'] = resp.xpath('//*[@class="post-title"]/text()').extract_first()
        item['abstract'] = resp.xpath('//*[@class="synopsis"]/text()').extract_first()
        postTags = [i.strip() for i in resp.xpath('//*[@class="tax-container"]/a/text()').extract()]
        if postTags:
            item['topic'] = '!@@!'.join(postTags)
        else:
            item['topic'] = None
        container = resp.xpath('//div[@class="tablet-wrapper"]').extract_first()
        item['text'] = remove_tags(container).strip()
        item['author'] = resp.xpath('//*[@rel="author"]/text()').extract_first()
        item['image_urls'] = resp.xpath('//figure//img/@src').extract()
        yield item
