# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
from ..items import EnglishItem
import datetime
import re
pattern  = r'(\d{4}[\D]\d{1,2}[\D]\d{1,2}[\D]?)'



class ChinadailySpider(scrapy.Spider):
    name = 'chinadaily'
    allowed_domains = ['www.chinadaily.com.cn']
    start_urls = ['http://www.chinadaily.com.cn/china',
                  'http://www.chinadaily.com.cn/world',
                  'http://www.chinadaily.com.cn/business',
                  'http://www.chinadaily.com.cn/life',
                  'http://www.chinadaily.com.cn/culture',
                  'http://www.chinadaily.com.cn/travel',
                  'http://www.chinadaily.com.cn/sports',
                  'http://www.chinadaily.com.cn/opinion',
                 ]
   
    #js = string.format("document.querySelector('#kw').value=%s;document.querySelector('#su').click()", args.phone)
    #splash:evaljs(js)
    #submit = splash:select('#su')
    #submit:mouse_click()
    #splash:wait(3)

    script = """
         function main(splash, args)
             splash.resource_timeout = args.resource_timeout
             splash.images_enabled = args.images_enabled
             assert(splash:go(args.url))
             assert(splash:wait(args.wait))
             return splash:html()
         end
         """

    args = {'wait': 3, 
            "images_enabled": False,
            "resource_timeout": 10,
            "lua_source":script,
           }

    def start_requests(self):
        for url in self.start_urls:
            yield SplashRequest(url, self.parse, endpoint='execute', args=ChinadailySpider.args,\
                   splash_headers={"referer":ChinadailySpider.allowed_domains[0], "User-Agent":"Chromium"})

    def parse(self, response):
        for url in response.css(".topNav2_art a::attr(href)").extract():
            yield scrapy.Request(response.urljoin(url))
        for new in response.css("span.tw3_01_2_t a::attr(href)").extract():
            yield scrapy.Request(response.urljoin(new))
        for nextpage in response.css('div#div_currpage a::attr(href)').extract():
            yield scrapy.Request(response.urljoin(nextpage))

        title = response.css("div.lft_art h1::text").extract()[0].strip()
        temp = response.css("div.info span.info_l::text").extract()[0].strip()
        publish_time = re.search(pattern,  temp)
        if publish_time:
            publish_time = publish_time.group()
        content = " ".join(response.css('div#Content p::text').extract())
        item = EnglishItem()
        now = datetime.datetime.now()
        nowstr = now.strftime("%Y-%m-%d %H:%M:%S")
        fields = response.css('div.topNav3_art span#bread-nav a::text').extract()
        fields = map(lambda x:  x.replace(" ", ""), fields)
        item['crawl_time'] = nowstr
        item['source'] = ChinadailySpider.name
        item['author'] = response.css("div.info span.info_l::text").extract()[0].strip().split('|')[0]
        item['title'] = title
        item['content'] = content
        item['readers'] = 666
        item['publish_time'] = publish_time
        item['field'] = "".join(fields[1:])
        item['url'] = response.url
        yield item


