import logging
import re
import urllib
import demjson

from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Request, Rule

from items import NewsItem


class TencentSpider(CrawlSpider):
    name = "tencentSpider"
    #-------------test code-----------#
    # handle_httpstatus_list = [301, 302]
    # start_urls=["http://news.qq.com/a/20170912/076916.htm"] #301 302 测试
    # start_urls=["https://news.qq.com/a/20160730/000170.htm"] #正则替换测试
    # start_urls=["https://news.qq.com/a/20160418/023091.htm"] #p内嵌js代码
    # start_urls=["https://new.qq.com/omn/20180912/20180912A0YOFE.html"] #多处strong
    # start_urls=["https://new.qq.com/omn/20180920/20180920A0LWL400"] #ajax测试
       
    start_urls=["https://news.qq.com"]
    custom_settings = {
        "SPIDER_MIDDLEWARES": {"news.tencentMiddleware.TencentMiddleware": 543},
        "DOWNLOADER_MIDDLEWARES": {
            "news.tencentMiddleware.TencentDownloadMiddleware": 543,
            "scrapy.downloadermiddleware.useragent.UserAgentMiddleware": None,
        },
    }
    # allowed_domains  = ["news.qq.com", 'new.qq.com']

    url_pattern1 = re.compile(r"(.*)/a/(\d{8})/(\d+)\.htm")
    url_pattern2 = re.compile(r'(.*)/omn/(\d{8})/(.+)\.html')
    url_pattern3 = re.compile(r'(.*)/omn/.*([A-Z0-9]{16,19})')
    extract_pattern = re.compile(r"getNewsContentOnlyOutput\((.+)\)")
    rules = (
        Rule(LinkExtractor(allow=(url_pattern1)), callback="parse_news1", follow=True),
        Rule(LinkExtractor(allow=(url_pattern2,)), callback="parse_news2", follow=True),
        Rule(LinkExtractor(allow=(url_pattern3,)), callback="parse_news3", follow=True),
        )

    def parse_news1(self, response):
        item = NewsItem()
        selector = Selector(response)
        pattern = re.match(self.url_pattern1, response.url)  
        item["website"] = "tencent"
        item["time"] = pattern.group(2)
        item["news_id"] = pattern.group(2) + pattern.group(3)
        item["from_url"] = response.url
        item["title"] = selector.xpath("//h1/text()").extract()[0]
        item["content"] = []
        item["text"] = ""
        container = selector.xpath("//div[@id='Cnt-Main-Article-QQ']/child::p")
        for c in container:
            news_content = {}
            #判断是否为图片
            temp = c.xpath("./child::img/@src").extract()
            if temp:
                news_content["is_word"] = 0
                if not temp[0].startswith("https:") or not temp[0].startswith("http:"):
                    news_content["src"] = "https:"+temp[0]
                else: 
                    news_content["src"] = temp[0]
                item["content"].append(news_content)
                continue
            #排除镶嵌的script
            temp = c.xpath("./child::script")
            if temp:
                continue
            #提取所有文字
            temp = c.xpath("string(.)").extract()
            if not temp or not temp[0]:
                continue
            news_content["is_word"] = 1
            # news_content["para"] = temp[0].replace(u"\u3000", u"")
            news_content["para"] = temp[0]
            item["text"] += news_content["para"]
            item["content"].append(news_content)        
        return item

    def parse_news2(self, response):
        selector  = Selector(response)
        pattern = re.match(self.url_pattern2, str(response.url))
        item = NewsItem()
        item["website"] = "tencent"
        item["time"] = int(pattern.group(2))
        item["news_id"] = pattern.group(3)
        item["from_url"] = response.url
        item["title"] = selector.xpath("//div[@class='LEFT']/h1/text()").extract()[0]
        item["content"] = []
        item["text"] = ""
        content = selector.xpath("//p[@class='one-p']")
        for p in content:
            news_content = {}
            #判断是否为图片
            temp = p.xpath("./child::img/@src").extract()
            if temp:                
                news_content["is_word"] = 0
                if not temp[0].startswith("https:") or not temp[0].startswith("http:"):
                    news_content["src"] = "https:"+temp[0]
                else: 
                    news_content["src"] = temp[0]
                item["content"].append(news_content)
                continue
            #排除镶嵌的script
            temp = p.xpath("./child::script")
            if temp:
                continue                
            #提取文字
            temp = p.xpath("string(.)").extract()
            if not temp or not temp[0]:
                continue            
            news_content["is_word"] = 1
            # news_content["para"] = temp[0].replace(u"\u3000", u"")
            news_content["para"] = temp[0]
            item["text"] += news_content["para"]
            item["content"].append(news_content)
        return item

    def parse_news3(self, response):
        pattern = re.match(self.url_pattern3, response.url)
        news_id = pattern.group(2)
        api_part1 = "https://openapi.inews.qq.com/getQQNewsNormalContent?id="
        api_part2 = "&chlid=news_rss&refer=mobilewwwqqcom&otype=jsonp&ext_data=all&srcfrom=newsapp&callback=getNewsContentOnlyOutput"
        yield Request(api_part1+news_id+api_part2, callback=self.extract_html)

    def extract_html(self, response):
        pattern = re.match(self.extract_pattern, response.text)
        data = demjson.decode(pattern.group(1), encoding="utf-8")
        item = NewsItem()
        item["website"] = "tencent"
        item["title"] = data["title"]
        item["news_id"] = data["id"]
        item["time"] = data["id"][:8]
        item["from_url"] = data["url"]
        item["content"] = []
        item["text"] = ""
        for c in data["content"]:
            news_content = {}        
            #文字
            if c["type"] == 1:
                # news_content["para"] = c["value"].replace(u"\u3000", u"")
                news_content["para"] = c["value"]
                item["text"] += news_content["para"]
                news_content["is_word"] = 1
            #图片   
            elif c["type"] == 2:
                news_content["is_word"] = 0
                news_content["src"] = c["value"]
            item["content"].append(news_content)
        return item
