import logging
import re
import math
import time
import datetime

import demjson
from scrapy.spiders import Spider
from scrapy.http import Request
from items import NewsItem

"""
    新浪新闻爬虫V.2,调用API为  https://feed.mix.sina.com.cn/api/roll/get?pageid=153&lid=2509&k=&num=50&page=1&r=0.01986163759945847&callback=&_=1543220481946
"""
class SinaRollSpiderV2(Spider):
    name = "sinaRollSpiderV2"
    # date = time.strftime('%Y-%m-%d',time.localtime(time.time()-86400))
    date = int(time.mktime(datetime.date.today().timetuple()))-86400
    start_urls = [
        "https://feed.mix.sina.com.cn/api/roll/get?pageid=153&lid=2509&k=&num=50&page=1&r=0.01986163759945847&callback=&_="+str(date)+"000"
    ]
    ajax_url = "https://feed.mix.sina.com.cn/api/roll/get?pageid=153&lid=2509&k=&num=50&r=0.01986163759945847&callback=&_="+str(date)+"000"
    page = 1
    titleMap = {
        "2510": "国内",
        "2511": "国际",
        "2669": "社会",
        "2512": "体育",
        "2513": "娱乐",
        "2514": "军事",
        "2515": "科技",
        "2516": "财经",
        "2517": "股市",
        "2518": "美股"
        }

    custom_settings = {
        "SPIDER_MIDDLEWARES": {"news.sinaMiddleware.SinaMiddleware": 543},
        "DOWNLOADER_MIDDLEWARES": {
            "news.sinaMiddleware.SinaDownloadMiddleware": 543,
            "scrapy.downloadermiddleware.useragent.UserAgentMiddleware": None,
        },
        "LOG_FILE":"./logs/sina/"+time.strftime("%Y%m%d") + ".log",
        "LOG_LEVEL":"INFO"
    }
    #相似度极高的新闻路径
    repeats = list()

    extract_pattern = re.compile(r"var jsonData = (.*);\Z")
    black_pattern = re.compile(r"[\r\n\t]")

    def parse(self, response):  
        data = demjson.decode(response.text, encoding="utf-8")
        for news in data["result"]["data"]:
            if int(news["ctime"]) < self.date:
                self.logger.info("已获取全部新闻路径，完成条件：[", news["ctime"],self.date,"]")
                return
            item = {}
            item["title"] = news["title"]
            item["time"] = int(news["ctime"])
            item["tag"] = "其他"
            for l in news["lids"].split(","):
                if l in self.titleMap.keys():
                    item["tag"] = self.titleMap[l]
            yield Request(news["url"], callback=self.get_news_content, meta=item)
        self.page += 1
        yield Request(self.ajax_url+"&page="+str(self.page), callback=self.parse)

        #----------测试代码-------------------# 
        # url = self.ajax_url + "&date=" + self.date + "&page=" + str(self.page)
        # yield Request(url, callback=self.get_news_url, dont_filter=True)  
        

    def parse_js(self, response):
        text = response.body.decode("gbk")
        text = re.sub(self.black_pattern, "", text)
        pattern = re.match(self.extract_pattern, text)
        data = demjson.decode(pattern.group(1), encoding="utf-8")
        return data

    def get_news_content(self, response):
        item = NewsItem()
        item["title"] = response.meta["title"]
        item["tag"] = response.meta["tag"]
        item["time"] = response.meta["time"]
        item["from_url"] = response.url
        item["website"] = "sina"
        item["content"] = []
        item["text"] = ""
        item["cover"] = ""
        content_container = response.xpath("//div[@class='article']/child::node()")
        for c in content_container:
            news_content = {}
            #判断是否为图片
            temp = c.xpath("./child::img/@src").extract()
            if temp:
                news_content["is_word"] = 0
                if not temp[0].startswith("https:") and not temp[0].startswith("http:"):
                    news_content["src"] = "https:"+temp[0]
                else: 
                    news_content["src"] = temp[0]
                descr = c.xpath("./child::span/text()").extract()
                if descr:
                    news_content["descr"] = descr[0]
                item["content"].append(news_content)
                #为新闻添加封面
                if not item["cover"]:
                    item["cover"] = news_content["src"]
                continue    
            #排除镶嵌的script
            temp = c.xpath("./child::script")
            if temp:
                continue          
            #排除<script>
            if c.get().startswith('<script'):
                continue  
            #排除注释和div和script和style
            node = c.xpath(".").extract()
            if not node or (node[0].startswith("<!-") or node[0].startswith("<div") or node[0].startswith("<scrpit") or node[0].startswith("<style")):
                continue

            #提取文字
            temp = c.xpath("string(.)").extract()
            if not temp or not temp[0]:
                continue
            news_content["is_word"] = 1
            news_content["para"] = temp[0]
            item["text"] += temp[0]
            item["content"].append(news_content)
        return item

    def get_news_url(self, response):
        data = self.parse_js(response)
        for news in data["list"]:
            tag = news["channel"]["title"]
            title = news["title"]
            url = news["url"]
            time = news["time"]
            yield Request(
                url,
                callback=self.get_news_content,
                meta={"tag": tag, "title": title, "time":time},
            )


        


