"""
Crawl all data in website: chinadaily.com.cn
"""

from urllib import response
import scrapy
import time
import json
from ..items import WebpageCrawlerItem

class ChinaDailySpider(scrapy.Spider):
    name = 'ChinaDaily'
    allowed_domains = ['chinadaily.com.cn']
    custom_settings = {'LOG_LEVEL': 'ERROR'}

    max_pages = 400
    min_year = 2019
    urls = []
    count = 0
    is_over = False


    def start_requests(self):
        topics = ['sports', 'china', 'business', 'life', 'culture', 'travel', 'opinion']
        sub_topics = {'sports': ['soccer', 'basketball', 'volleyball', 'tennis', 'golf', 'swimming'],\
                      'china': ['governmentandpolicy', 'society', 'scitech', '59b8d010a3108c54ed7dfc30',\
                                'coverstory', 'environment', '59b8d010a3108c54ed7dfc27', '59b8d010a3108c54ed7dfc25'],\
                      'business': ['economy', 'companies', 'biz_industries', 'tech', 'motoring', 'money'],\
                      'life': ['fashion', 'celebrity', 'people', 'food', 'health', 'video', 'photo'],\
                      'culture': ['art', 'musicandtheater', 'filmandtv', 'books', 'heritage', 'eventandfestival', 'culturalexchange'],\
                      'travel': ['news', 'citytours', 'guidesandtips', 'footprint',\
                            'aroundworld', '59b8d013a3108c54ed7dfca3', 'photo', 'video'],\
                      'opinion': ['editionals', 'op-ed', 'commentator', 'opinionline']
                     }
        for topic in topics:
            for sub_topic in sub_topics[topic]:
                self.is_over = False
                for i in range(1, self.max_pages):
                    if self.is_over:
                        break
                    url = f'https://www.chinadaily.com.cn/{topic}/{sub_topic}/page_{i}.html'
                    yield scrapy.Request(url, callback=self.parse, cb_kwargs={"topic": topic})

    def parse(self, response, **kwargs):
        detail_urls = []
        parent_nodes = response.xpath('//span[@class="tw3_01_2_t"]')
        p_contents = parent_nodes.getall()
        if len(p_contents) != 0:
            print(f'Crawling the url: [{response.url}')
            for node in parent_nodes:
                url = node.xpath('h4/a/@href').get()
                news_time = node.xpath('b/text()').get().split(" ")[0]
                detail_urls.append((f'https:{url}', news_time))
            
            for url, news_time in detail_urls:
                if int(news_time.split("-")[0]) < self.min_year:
                    print(f'Before 2019, crawling of topic {kwargs["topic"]} stoped.')
                    self.is_over = True
                    break
                item = WebpageCrawlerItem()
                item["time"] = news_time
                item["category"] = kwargs["topic"]
                yield scrapy.Request(url, callback=self.get_item, cb_kwargs={"item": item})
        else:
            print(f'No detail pages in url: [{response.url}], crawling of topic {kwargs["topic"]} stoped.')
            self.is_over = True
    
    def get_item(self, response, **kwargs):
        """
        This method will return a new item to pipelines, then pipelines will do the job.
        """
        page_url = str(response.url)
        page_title = response.xpath('//h1/text()').get()                         # Only one title in a article
        page_content = response.xpath('//div[@id="Content"]/p/text()').getall()  # Contents are seperated into many paragraphs, so page_content is a list object

        item = kwargs['item']

        if (page_title is not None) and (len(page_content) != 0):
            page_title = page_title.strip()
            page_content = '\n'.join(page_content)
            page_content = page_content.strip()
            item['source'] = "ChinaDaily"
            item['url'] = page_url
            item['title'] = page_title
            item['content'] = page_content
            self.count += 1
            return item
        else:
            print(f'No target contents in url: [{response.url}]')