"""
Crawl all data in website: apnews.com
"""

import scrapy
from scrapy import Selector
from ..items import WebpageCrawlerItem

class APNewsSpider(scrapy.Spider):
    name = 'APNews'
    allowed_domains = ['apnews.com']
    custom_settings = {'LOG_LEVEL': 'ERROR'}
    min_year = 2019


    def start_requests(self):
        topic_mapping = {'lifestyle': 'life'}
        topics = ['sports', 'politics', 'entertainment', 'business', 'lifestyle']
        sub_topics = {'sports': ['sports', 'mlb', 'nba', 'french-open', 'nhl', 'nfl'],\
                      'politics': ['politics'],\
                      'entertainment': ['entertainment', 'film-reviews', 'music-reviews'],\
                      'business': ['business', 'small-business', 'financial-markets', 'economy', 'off-the-charts', 'business-highlights'],\
                      'lifestyle': ['lifestyle']
                     }
        for topic in topics:
            for sub_topic in sub_topics[topic]:
                url = f'https://apnews.com/hub/{sub_topic}'
                yield scrapy.Request(url, callback=self.parse, cb_kwargs={"topic": topic_mapping.get(topic, topic)})

    def parse(self, response, **kwargs):
        print(f'Crawling the url: [{response.url}]...')
        topic = kwargs['topic']
        sel = Selector(response)
        info_items = sel.css("div[data-key=feed-card-wire-story-with-image]")

        for info_item in info_items:
            news_time = info_item.css("div > div > span[data-key=timestamp]::attr(data-source)").extract_first()[:10]
            if int(news_time.split("-")[0]) < self.min_year:
                break
            detail_url = "https://apnews.com" + info_item.css("div > a::attr(href)").extract_first()
            title = info_item.css("div > a > h2::text").extract_first()
            item = WebpageCrawlerItem()
            item["time"] = news_time
            item["category"] = topic
            item["title"] = title
            yield scrapy.Request(detail_url, callback=self.get_item, cb_kwargs={"item": item})

    def get_item(self, response, **kwargs):
        """
        This method will return a new item to pipelines, then pipelines will do the job.
        """
        item = kwargs['item']
        sel = Selector(response)
        try:
            passage = ' '.join(sel.css("div.Article > p::text").extract()).strip().replace("  ", " ")
        except:
            return None
        item['source'] = "APNews"
        item['url'] = str(response.url)
        item['content'] = passage
        return item
