"""
Crawl all data in website: globaltimes.cn
"""

import scrapy
from scrapy import Selector
from ..items import WebpageCrawlerItem

class GlobalTimesSpider(scrapy.Spider):
    name = 'GlobalTimes'
    allowed_domains = ['globaltimes.cn']
    custom_settings = {'LOG_LEVEL': 'ERROR'}
    min_year = 2019


    def start_requests(self):
        topic_mapping = {"sport": "sports"}
        topics = ['sport', 'china', 'life', 'opinion']
        sub_topics = {'sport': [''],\
                      'china': ['politics', 'society', 'diplomacy', 'military', 'science', 'odd'],\
                      'life': ['culture', 'entertainment', 'travel', 'learning-chinese'],\
                      'opinion': ['editorial', 'observer', 'Global-Minds', 'asian-review', 'top-talk', 'viewpoint', 'columnists']
                     }
        for topic in topics:
            for sub_topic in sub_topics[topic]:
                url = f'https://www.globaltimes.cn/{topic}/{sub_topic}'
                if sub_topic != '': url += '/'
                yield scrapy.Request(url, callback=self.parse, cb_kwargs={"topic": topic_mapping.get(topic, topic)})

    def parse(self, response, **kwargs):
        print(f'Crawling the url: [{response.url}]...')
        detail_urls = []
        topic = kwargs['topic']
        sel = Selector(response)
        info_items = sel.css("div.list_info")

        for info_item in info_items:
            news_time = info_item.css("div.source_time::text").extract_first().split("|")[-1].strip().split(" ")[0].replace("/", "-")
            if int(news_time.split("-")[0]) < self.min_year:
                break
            detail_url = info_item.css("a::attr(href)").extract_first()
            if len(news_time.split("-")[1]) == 1: # 1-9
                news_time = news_time[:5] + "0" + news_time[5:]
            if len(news_time.split("-")[2]) == 1: # 1-9
                news_time = news_time[:8] + "0" + news_time[8:]
            detail_urls.append((detail_url, news_time))
        
        for url, news_time in detail_urls:
            item = WebpageCrawlerItem()
            item["time"] = news_time
            item["category"] = topic
            yield scrapy.Request(url, callback=self.get_item, cb_kwargs={"item": item})

    
    def get_item(self, response, **kwargs):
        """
        This method will return a new item to pipelines, then pipelines will do the job.
        """
        item = kwargs['item']
        sel = Selector(response)
        try:
            title = sel.css("div.article_title::text").extract_first().strip()
            passage = ''.join(sel.css("div.article_right::text").extract()).strip()
        except:
            title = passage = ""
        if passage == "" or title == "":
            return None
        item['source'] = "GlobalTimes"
        item['url'] = str(response.url)
        item['title'] = title
        item['content'] = passage
        return item
