import scrapy, re, time
from lxml import html
from w3lib.html import remove_tags
from kyqb_collection_spider.spiders.common import CommonSpider
from kyqb_collection_spider.items import KyqbCollectionSpiderItem

class BreakingdefenseAlliesSpider(CommonSpider):
    name = 'breakingdefense_allies'
    allowed_domains = ['breakingdefense.com']
    start_urls = ['http://breakingdefense.com/category/allies/']
    custom_settings = {
        'ITEM_PIPELINES': {'kyqb_collection_spider.pipelines.NewImagesPipeline': 5,
                           'kyqb_collection_spider.pipelines.KyqbCollectionSpiderPipeline': 600},
    }

    def __init__(self, *args, **kwargs):
        self.repeat = 0
        self.page_num = 0
        self.page_max_num = 500
        self.security = None
        self.post_param = {
            'action': 'bm_ajax_load_more',
            'page': {},
            'query': {
                "category_name": "allies",
                "tag": "",
                "tag_id": "",
                "paged": '0',
                "post__not_in": [],
                "tag__in": [],
                "post_type": ""
            },
            'security': {}
        }
        self.base_url = 'https://breakingdefense.com/category/allies/'
        self.url_api = 'https://breakingdefense.com/wp-admin/admin-ajax.php'
        super(BreakingdefenseAlliesSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        security = re.findall(r'bmInfiniteScroll[\S\s]+"nonce":"(\w+)"};', response.text)
        if security:
            headers = response.headers.to_unicode_dict()
            new_headers = {}
            new_headers['cookie'] = headers.pop('set-cookie')
            new_headers['x-requested-with'] = 'XMLHttpRequest'
            new_headers['content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            new_headers['referer'] = self.base_url
            new_headers['origin'] = 'https://breakingdefense.com'
            new_headers['sec-fetch-mode'] = 'cors'
            self.headers = new_headers
            post_param = self.post_param
            post_param['page'] = str(self.page_num)
            self.page_num = self.page_num + 1
            post_param['security'] = security[0]
            yield scrapy.FormRequest(url=self.url_api, formdata=post_param, headers=new_headers,
                                     callback=self.xhr_parse,
                                     meta={"headers": new_headers})

    def xhr_parse(self, response):
        data = response.json()
        if (data['success']) and data['data']:
            doc = html.fromstring(data['data'])
            body = scrapy.Selector(text=html.tostring(doc=doc))
            posts = body.css('.postTitle')
            for post in posts:
                href = post.xpath('.//a/@href').extract_first()
                if '/tag/' in href:
                    continue
                else:
                    if not self.redis.sismember(self.name, href):
                        yield scrapy.Request(url=href, headers=self.headers, callback=self.article)
                    else:
                        if self.repeat < 5:
                            self.repeat = self.repeat + 1
                        else:
                            self.logger.info(f'已爬取到上次位置:{href}')
                            return None
            post_param = self.post_param
            post_param['page'] = str(self.page_num)
            if self.page_num < self.page_max_num:
                self.page_num = self.page_num + 1
                yield scrapy.FormRequest(self.url_api, headers=self.headers, formdata=post_param,
                                         callback=self.xhr_parse)

    def article(self, response):
        # print(response.url)
        doc = html.fromstring(response.text)
        scripts = doc.findall('.//script')
        for script in scripts:
            script.drop_tree()
        text = html.tostring(doc=doc)
        body = scrapy.Selector(text=text)
        item = KyqbCollectionSpiderItem()
        item['data_address'] = response.url
        item['website_name'] = 'BREAKINGDEFENSE'
        item['spider_name'] = self.name
        item['data_source_type'] = '新闻媒体'
        item['data_type'] = 'ALLIES'
        item['collection_mode'] = 'spider'
        item['collection_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
        timestamp = body.xpath('//*[@class="timestamp"]/text()').extract_first().replace(' on ', '')
        item['publish_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(timestamp, '%B %d, %Y at %H:%M %p'))
        item['title'] = body.xpath('//*[@class="postTitle"]/a/text()').extract_first()
        item['abstract'] = body.xpath('//*[@class="postExcerpt"]/text()').extract_first()
        postTags = body.css('.postTags').extract_first()
        if postTags:
            tagList = [i.strip() for i in remove_tags(postTags).replace('Topics:', '').split(',')]
            item['topic'] = '!@@!'.join(tagList)
        else:
            item['topic'] = None
        container = body.css('.entry').extract_first()
        item['text'] = remove_tags(container).strip()
        item['author'] = body.xpath('//*[@class="postAuthor"]//a[@rel="author"]/text()').extract_first()
        item['image_urls'] = body.xpath(
            '//*[@id="container"]//div[@id="mainContent"]//div[@class="entry"]//img/@src').extract()
        yield item