"""
Crawl all data in website: foxnews.com
"""

import json
import scrapy
from scrapy import Selector
from ..items import WebpageCrawlerItem

class FoxNewsSpider(scrapy.Spider):
    name = 'FoxNews'
    allowed_domains = ['foxnews.com']
    custom_settings = {'LOG_LEVEL': 'ERROR'}
    max_entries = 30
    max_pages = 400
    min_year = 2019
    is_over = False


    def start_requests(self):
        topic_mapping = {'lifestyle': 'life'}
        topics = ['sports', 'politics', 'entertainment', 'lifestyle']
        # topics = ['politics']
        sub_topics = {
                      'sports': [''],\
                    #   'sports': ['nfl', 'ncaa-fb', 'mlb', 'nba', 'nhl', 'ufc', 'golf', 'tennis', 'ncaa-bk'],\
                      'politics': [''],\
                      'entertainment': [''],\
                      'lifestyle': ['']
                     }
    
        for topic in topics:
            for sub_topic in sub_topics[topic]:
                self.is_over = False
                for i in range(self.max_pages):
                    if self.is_over:
                        break
                    url = r"https://www.foxnews.com/api/article-search?searchBy=categories&values=fox-news%2F{}&size=30&from={}".format(topic, i*30)
                    yield scrapy.Request(url, callback=self.parse, cb_kwargs={"topic": topic_mapping.get(topic, topic)})

    def parse(self, response, **kwargs):
        print(f'Getting detail pages from the url: [{response.url}]...')
        topic = kwargs['topic']
        detail_infos = json.loads(response.text)
        if len(detail_infos) == 0:
            print(f"No more news in [{response.url}], end for {topic}!")
            self.is_over = True
            return
        for info in detail_infos:
            if int(info["publicationDate"][:4]) < self.min_year:
                print(f"Before 2019, end for {topic}!")
                self.is_over = True
                break
            if info["category"]["name"] == "VIDEO":
                continue
            item = WebpageCrawlerItem()
            item["title"] = info["title"]
            item["time"] = info["publicationDate"][:10]
            item["category"] = topic
            item["url"] = "https://www.foxnews.com" + info["url"]
            yield scrapy.Request(item["url"], callback=self.get_item, cb_kwargs={"item": item})
    
    def get_item(self, response, **kwargs):
        """
        This method will return a new item to pipelines, then pipelines will do the job.
        """
        item = kwargs['item']
        sel = Selector(response)
        paragraphs = sel.css("div.article-body > p ::text")
        passage = ""
        for para_node in paragraphs:
            text = para_node.extract()
            passage += text
        if passage == "":
            return None
        item['source'] = "FoxNews"
        item['content'] = passage
        return item
