# coding=UTF-8
# author=suemi
# created at 16/5/15
import datetime

import re

import scrapy

from crawler.items import ArticleItem
from crawler.spiders.ArticleSpider import ArticleSpider


class ChinaNewsSpider(ArticleSpider):
    name = 'chinanews'

    allowed_domains = ['www.chinanews.com']

    def __init__(self, date=datetime.datetime.now().strftime("%Y%m%d"), topic='gn'):
        self.topic = topic
        self.prefixURL = 'http://www.chinanews.com/scroll-news/' + self.topic
        self.currentDate = datetime.datetime.strptime(date, "%Y%m%d")
        self.suffixURL = 'news.shtml'
        ChinaNewsSpider.start_urls = [
            self.prefixURL + "/" + self.currentDate.strftime("%Y/%m%d") + "/" + self.suffixURL]

    def candidates(self, response):
        if self.category(response.url):
            return []
        current = datetime.datetime.strptime(re.search(r'\d{4}/\d{4}', response.url).group(), "%Y/%m%d")
        results = response.xpath('//div[contains(@class,"dd_bt")]/a/@href').extract()
        results.append(self.prefixURL + "/" +
                       (current - datetime.timedelta(1)).strftime("%Y/%m%d") +
                       "/" + self.suffixURL
                       )
        return results

    def category(self, url):
        return re.match(r'^http://www.chinanews.com/[a-zA-Z]{2}/[0-9]{4}/[0-9]{2}-[0-9]{2}/[0-9]{3,9}\.shtml$',
                        url) != None

    def generate(self, response):
        item = ArticleItem()
        try:
            item['title'] = "".join(response.xpath('//h1/text()').extract()).strip()
            item['url'] = response.url
            item['site'] = self.name
            item['crawledAt'] = datetime.datetime.now()
            item['tag'] = self.topic
            item['content'] = "\n".join(response.xpath('//p/text()').extract())
            tmp = response.xpath('//div[contains(@class,"left-time")]/div[contains(@class,"left-t")]/text()').extract()[
                0]
            tmp = tmp.encode('UTF-8')
            dateStr = "".join(re.compile(r'\d').findall(tmp.split("\xe3\x80\x80")[0]))
            item['publishAt'] = datetime.datetime.strptime(dateStr, '%Y%m%d%H%M')
            item['source'] = tmp.split("\x9a")[-1] if len(tmp.split("\x9a")) > 1 else None
            item['author'] = \
                response.xpath(
                    '//div[contains(@class,"left_name")]/div[contains(@class,"left_name")]/text()').extract()[
                    0].split(':')[-1][:-2]
        except Exception, e:
            self.logger.warning("Err in genrate " + response.url + " , msg: " + e.message)
        finally:
            return item
