# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
from ..items import EnglishItem
import datetime
import re
pattern  = r'(\d{4}[\D]\d{1,2}[\D]\d{1,2}[\D]?)'



class CnnSpider(scrapy.Spider):
    name = 'cnn'
    allowed_domains = ['edition.cnn.com']
    start_urls = [
                  #'http://edition.cnn.com/',
                  'https://edition.cnn.com/world',
                  'https://edition.cnn.com/africa',
                  'https://edition.cnn.com/americas',
                  'https://edition.cnn.com/asia',
                  'https://edition.cnn.com/australia',
                  'https://edition.cnn.com/china',
                  'https://edition.cnn.com/europe',
                  'https://edition.cnn.com/india',
                  'https://edition.cnn.com/middle-east',
                  'https://edition.cnn.com/uk',
                 ]
   
    #js = string.format("document.querySelector('#kw').value=%s;document.querySelector('#su').click()", args.phone)
    #splash:evaljs(js)
    #submit = splash:select('#su')
    #submit:mouse_click()
    #splash:wait(3)

    script = """
         function main(splash, args)
             splash.resource_timeout = args.resource_timeout
             splash.images_enabled = args.images_enabled
             assert(splash:go(args.url))
             assert(splash:wait(args.wait))
             return splash:html()
         end
         """

    args = {'wait': 3, 
            "images_enabled": False,
            "resource_timeout": 10,
            "lua_source":script,
           }

    def start_requests(self):
        for url in self.start_urls:
            yield SplashRequest(url, self.parse, endpoint='execute', args=CnnSpider.args,\
                   splash_headers={"referer":CnnSpider.allowed_domains[0], "User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0"})

    def parse(self, response):
        for url in response.css("div.media a::attr(href)").extract():
            yield scrapy.Request(response.urljoin(url))
        for url in response.css("h3.cd__headline a::attr(href)").extract():
            yield scrapy.Request(response.urljoin(url))
        if response.url.find("video") > 0:
            return
        video = False
        title = response.css('h1.pg-headline::text').extract()
        if title:
            title = title[0]
        else:
            title = response.css('h1.Article__title::text').extract()
            if title:
                title = title[0]
            else:
                print response.url
                return
        #        title = response.css('h1.media__video-headline::text').extract()
        #        if title:
        #            title = title[0]
        #            video = True

        if video == False:
            temp = response.css('p.update-time::text').extract()[0]
            publish_time = re.search(" [JFMASND].+\d,.?\d+", temp).group().strip()
            if publish_time:
                publish_time = datetime.datetime.strptime(publish_time, "%B %d, %Y").strftime('%Y-%m-%d')

            content =  "".join(response.css('div.zn-body__paragraph *::text').extract())
        #else:
        #    publish_time =  "-".join(response.url.split('/')[5:8])
        #    content =  response.css('div.media__video-description::text').extract()[0]

        item = EnglishItem()
        now = datetime.datetime.now()
        nowstr = now.strftime("%Y-%m-%d %H:%M:%S")
        item['crawl_time'] = nowstr
        item['source'] = CnnSpider.name
        item['author'] = "".join(response.css("span.metadata__byline__author *::text").extract())
        item['title'] = title
        item['content'] = content
        item['readers'] = 666
        item['publish_time'] = publish_time
        item['field'] = response.url.split('/')[6]
        item['url'] = response.url
        yield item


