# coding=UTF-8
# author=yinkang
# created at 16/5/15
import datetime

import re

import scrapy

from crawler.items import ArticleItem
from crawler.spiders.ArticleSpider import ArticleSpider
from scrapy.http import Request


class IFengNewsSpider(ArticleSpider):
    name = 'ifengnews'

    allowed_domains = ['news.ifeng.com']

    def __init__(self, date=datetime.datetime.now().strftime("%Y%m%d"), topic='ifeng_news'):
        self.topic = topic
        self.prefixURL = 'http://news.ifeng.com/listpage/11502'
        self.currentDate = datetime.datetime.strptime(date, "%Y%m%d")
        self.suffixURL = 'rtlist.shtml'
        IFengNewsSpider.start_urls = [
            self.prefixURL + "/" + self.currentDate.strftime("%Y%m%d") + "/1/" + self.suffixURL]
        self.parsedUrls = [
            self.prefixURL + "/" + self.currentDate.strftime("%Y%m%d") + "/1/" + self.suffixURL]
        print self.parsedUrls
        
        IFengNewsSpider.news_count = 0

    def candidates(self, response):
        # if self.category(response.url):
        #     return []
        
        self.logger.info( "crawling list: " + response.url)
        # print response
        # print response.xpath('//title/text()').extract()[0]
        # 文章列表
        results = response.xpath('//div[contains(@class,"newsList")]/ul/li/a/@href').extract()
        # 翻页列表
        pageUrls = response.xpath('//div[contains(@class,"m_page")]/span/a/@href').extract()
        allPageParsed = True
        for url in pageUrls:
            if url not in self.parsedUrls:
                # 添加前一页
                self.parsedUrls.append(url)
                results.append(url)
                allPageParsed = False
        if allPageParsed:
            # 添加前一天
            # print "xx"
            self.parsedUrls = []
            backDayUrls = response.xpath('//div[contains(@id,"backDay")]/a/@href').extract()
            if backDayUrls:
                self.parsedUrls.append(backDayUrls[0])
                results.append(backDayUrls[0])
            # print backDayUrl
            # print results
        return results

    def category(self, url):
        # http://news.ifeng.com/a/20160531/48884482_0.shtml
        return re.match(r'^http://news.ifeng.com/a/[0-9]{8}/[0-9]{8}_[0-9]{1}\.shtml$',
                        url) != None

    # def getAllDigit(mstr):
    #     tmpStr = ""
    #     for c in mstr:
    #         if c.isdigit():
    #             tmpStr = tmpStr + c
    #     return tmpStr

    def generate(self, response):
        # print response
        # print response.xpath('//title/text()').extract()[0]
        
        IFengNewsSpider.news_count+=1
        if IFengNewsSpider.news_count%100==0:
            print IFengNewsSpider.news_count
        
        item = ArticleItem()
        
        # 匹配日期类型太多太复杂，暂时放弃，找不到直接存 空
        
        # print response
        # cnDatePublished = datetime.datetime.strptime("197001010000", '%Y%m%d%H%M')  #默认时间
        
        # dateArr = response.xpath('//span[contains(@itemprop,"datePublished")]/text()').extract()
        # if not dateArr:
        #     dateArr = response.xpath('//span[contains(@class,"Arial")]/text()').extract()
        # if not dateArr:
        #     dateArr = response.xpath('//div[contains(@class,"zuo_word fl")]/p[2]/text()').extract()
        # cnDatePublishedStr = "197001010000"
        # if dateArr:
        #     tmpStr = getAllDigit(dateArr)
        # print cnDatePublishedStr
        
        try:
            titleArr = response.xpath('//h1/text()').extract()
            if not titleArr:
                titleArr = response.xpath('//title/text()').extract()
            item['title'] = titleArr[0]
            item['url'] = response.url
            item['site'] = self.name
            item['crawledAt'] = datetime.datetime.now()
            item['tag'] = self.topic
            
            cArr = response.xpath('//div[contains(@class,"js_selection_area")]/p/text()').extract()
            if not cArr:
                cArr = response.xpath('//div[contains(@class,"wrapIphone AtxtType01")]/p/text()').extract()
            if not cArr:
                cArr = response.xpath('//div[contains(@class,"yc_con_txt")]/p/text()').extract()
            if not cArr:
                cArr = response.xpath('//p[contains(@class,"photoDesc")]/text()').extract()
            if not cArr:
                cArr = response.xpath('//div[contains(@class,"picDiv")]/p/text()').extract()
            if not cArr:
                cArr = response.xpath('//div[contains(@class,"yaow")]/p/text()').extract()
            if not cArr:
                cArr = response.xpath('//p[contains(@class,"text_con")]/text()').extract()
            
            item['content'] = "\n".join(cArr)
            
            tmpStr = ""
            dateArr = response.xpath('//span[contains(@itemprop,"datePublished")]/text()').extract()
            if not dateArr:
                dateArr = response.xpath('//span[contains(@class,"Arial")]/text()').extract()
            if not dateArr:
                dateArr = response.xpath('//div[contains(@class,"zuo_word fl")]/p[2]/text()').extract()
            if dateArr:
                tmpStr = dateArr[0]
            
            item['publishAt'] = tmpStr
            
            tmpStr = ""
            sourceArr = response.xpath('//span[contains(@itemprop,"publisher")]/a/text()').extract()
            if sourceArr:
                tmpStr = sourceArr[0]
            
            item['source'] = tmpStr
            
            tmpStr = ""
            sourceArr = response.xpath('//span[contains(@itemprop,"author")]/span/text()').extract()
            if sourceArr:
                tmpStr = sourceArr[0]
            
            item['author'] = tmpStr
        except Exception, e:
            self.logger.warning("Err in genrate " + response.url + " , msg: " + e.message)
        finally:
            return item

