"""
Crawl all data in website: usatoday.com
"""

import re
import scrapy
from scrapy import Selector
from ..items import WebpageCrawlerItem

class USATodaySpider(scrapy.Spider):
    name = 'USAToday'
    allowed_domains = ['usatoday.com']
    custom_settings = {'LOG_LEVEL': 'ERROR'}
    min_year = 2019


    def start_requests(self):
        topic_mapping = {'money': 'business'}
        # topics = ['sports', 'entertainment', 'money', 'life', 'travel', 'opinion']
        topics = ['travel', 'opinion']
        sub_topics = {'sports': ['nfl', 'nba', 'mlb', 'nhl', 'ncaaf', 'nfl', 'tennis', 'indycar',\
                             'outdoors', 'highschool', 'nascar', 'soccer', 'wnba', 'ncaab', 'ncaaw',\
                             'odds', 'sportspulse', 'weeklypulse', 'boxing', 'olympics', 'sports-seriously'],\
                      'entertainment': ['movies', 'celebrities', 'tv', 'music', 'books', 'entertainthis',\
                             'celebrity-deaths', 'american-influencer-awards'],\
                      'money': ['personal-finance', 'cars', 'retirement', 'investing', 'careers', 'small-business'],\
                      'life': ['health-wellness', 'food-dining', 'humankind', 'womenofthecentury', 'problemsolved', 'holidays'],\
                      'travel': ['destinations', 'airline-news', 'experience-america', 'cruises'],\
                      'opinion': ['race-in-america', 'todaysdebate', 'columnist', 'voices', 'suzette-hackney', 'policing'],
                     }
        for topic in topics:
            for sub_topic in sub_topics[topic]:
                url = f'https://www.usatoday.com/{topic}/{sub_topic}/'
                yield scrapy.Request(url, callback=self.parse, cb_kwargs={"topic": topic_mapping.get(topic, topic)})

    def parse(self, response, **kwargs):
        print(f'Crawling the url: [{response.url}]...')
        topic = kwargs['topic']
        sel = Selector(response)
        info_items = sel.css("div.gnt_m.gnt_m_flm > a")

        for info_item in info_items:
            detail_url = info_item.css("::attr(href)").extract_first()
            if detail_url is None or topic not in detail_url:
                continue
            try:
                news_time = '-'.join(re.search(r'(\d{4})/(\d{2})/(\d{2})', detail_url).groups())
                if int(news_time.split("-")[0]) < self.min_year:
                    break
                detail_url = "https://usatoday.com" + detail_url
                title = info_item.css("::text").extract_first().strip()
            except:
                continue
            item = WebpageCrawlerItem()
            item["time"] = news_time
            item["category"] = topic
            item["title"] = title
            yield scrapy.Request(detail_url, callback=self.get_item, cb_kwargs={"item": item})

    def get_item(self, response, **kwargs):
        """
        This method will return a new item to pipelines, then pipelines will do the job.
        """
        item = kwargs['item']
        sel = Selector(response)
        try:
            passage = ''.join(sel.css("article > div.gnt_ar_b > p::text").extract()).strip()
        except:
            passage = ''
        if passage == '':
            return None
        item['source'] = "USAToday"
        item['url'] = str(response.url)
        item['content'] = passage
        return item
