
import re
import time

import demjson
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.spiders import Spider

from items import NewsItem


class TencentRollSpider(Spider):
    name = "tencentRollSpider"
    date = time.strftime('%Y-%m-%d',time.localtime(time.time()-86400*5))
    tags = ["news", "ent", "sports", "finance", "tech", "games", "auto", "edu", "house"]
    start_urls = [
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=news&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=ent&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=sports&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=finance&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=tech&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=games&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=edu&mode=1&cata=&date="+date+"&_="+str(int(time.time()-86400))+"&page=1",
    ]
    custom_settings = {
        "SPIDER_MIDDLEWARES": {"news.tencentMiddleware.TencentMiddleware": 543},
        "DOWNLOADER_MIDDLEWARES": {
            "news.tencentMiddleware.TencentDownloadMiddleware": 543,
            "scrapy.downloadermiddleware.useragent.UserAgentMiddleware": None,
        },
        "LOG_FILE":"./logs/tencent/"+time.strftime("%Y%m%d") + ".log",
        "LOG_LEVEL":"INFO"
    }    

    ajax_url = "http://roll.news.qq.com/interface/cpcroll.php?callback=rollback&site=news&mode=1&cata=&date="+date
    #相似度极高的新闻路径
    repeats = list()

    extract_pattern = re.compile(r"rollback\((.+)\)")

    def parse(self, response):
        data = self.parse_js(response)
        if data["response"]["code"] != "0" :
            return
        for page in range(1, data["data"]["count"]+1):
            url = response.url[:-1] + str(page)
            yield Request(url, callback=self.get_news_urls, dont_filter=True)
        #----------------测试代码-----------------------#
        # url = response.url[:-2] + str(1)
        # yield Request(url, callback=self.get_news_urls, dont_filter=True)

    def parse_js(self, response):
        pattern = re.match(self.extract_pattern, response.text)
        data = demjson.decode(pattern.group(1), encoding="utf-8")
        return data

    def get_news_content(self, response):
        item = NewsItem()
        item["title"] = response.meta["title"]
        item["tag"] = response.meta["tag"]
        item["time"] = response.meta["time"]
        item["from_url"] = response.url
        item["website"] = "tencent"
        item["content"] = []
        item["text"] = ""
        item["cover"] = ""
        container = response.xpath("//div[@id='Cnt-Main-Article-QQ']/child::p")
        for c in container:
            news_content = {}
            #判断是否为图片
            temp = c.xpath("./child::img/@src").extract()
            if temp:
                news_content["is_word"] = 0
                if not temp[0].startswith("https:") or not temp[0].startswith("http:"):
                    news_content["src"] = "https:"+temp[0]
                else: 
                    news_content["src"] = temp[0]
                item["content"].append(news_content)
                #为新闻添加封面
                if not item["cover"]:
                    item["cover"] = news_content["src"]
                continue
            #排除镶嵌的script
            temp = c.xpath("./child::script")
            if temp:
                continue
            #提取所有文字
            temp = c.xpath("string(.)").extract()
            if not temp or not temp[0]:
                continue
            news_content["is_word"] = 1
            # news_content["para"] = temp[0].replace(u"\u3000", u"")
            news_content["para"] = temp[0]
            item["text"] += news_content["para"]
            item["content"].append(news_content)
        return item

    def get_news_urls(self, response):
        data = self.parse_js(response)
        for news in data["data"]["article_info"]: 
            title = news["title"]
            url = news["url"]
            tag = news["column"]
            time_array = time.strptime(news["time"], "%Y-%m-%d %H:%M:%S")
            news_time = int(time.mktime(time_array))
            yield Request(url, callback=self.get_news_content, meta={"time":news_time, "title":title, "tag":tag})

