# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.exceptions import CloseSpider
from project_sina_miltary.items import SinaNews
import time
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
import json
import logging

class SinaNewsSpider(scrapy.Spider):
    name = 'sinamil'
    #allowed_domains = ['sina.com']

    start_urls = ['http://roll.mil.news.sina.com.cn/col/zgjq/index.shtml']

    _base_url = 'http://roll.mil.news.sina.com.cn/col/zgjq/index_{0}.shtml'
 
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language' : 'zh-CN,zh;q=0.9',
        'Accept-Encoding' : 'gzip, deflate',
        'User-Agent' : 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Mobile Safari/537.36',
        'Host' : 'roll.mil.news.sina.com.cn',  
        'Cache-Control' : 'no-cache',
        'Connection' : 'keep-alive',
        'Cookie' : 'statuid=__121.49.107.4_1525785854_0.46490800; statuidsrc=PostmanRuntime%2F7.1.1%60121.49.107.4%60http%3A%2F%2Finterface.sina.cn%2Fwap_api%2Flayout_col.d.json%3Fshowcid%3D56261%26col%3D56261%26level%3D1%252C2%26show_num%3D30%26page%3D5%26act%3Dmore%26jsoncallback%3DcallbackFunction%26_%3D1525785831996%26callback%3DZepto1525785777740%60%60__121.49.107.4_1525785854_0.46490800; ustat=__121.49.107.4_1525785854_0.46490800; genTime=1525785854; vt=4; tuijian=usrmdinst_4',
        'Referer' : '',
    }

    custom_settings = {
        #'DEFAULT_REQUEST_HEADERS' : headers,
        'REDIRECT_ENABLED' : 'True',
        #'LOG_LEVEL' : 'WARNING',
    }

    generator = None

    def __init__(self, category = None, *args, **kwargs):
        super(SinaNewsSpider, self).__init__(*args, **kwargs)
        self.generator = self.requestUrlsGen()

    # 生成器
    def requestUrlsGen(self, num = 1):
        temp = []
        pageIndex = 2
        i = 0
        while True:
            while i < num :
                temp.append(self._base_url.format(pageIndex))
                pageIndex += 1
                i += 1
            
            i = 0
            logging.info('length of temp %d' % len(temp) + ' PageIndex %s' % pageIndex)

            for tmp in temp:
                logging.info(tmp)
            yield temp
            temp.clear()

    def start_request(self):   
        if (self.generator == None):
            self.generator = self.requestUrlsGen()
         
        urls = next(self.generator)
        for url in urls:
            print(url)

            yield scrapy.Request(
                method = 'GET',
                url = url, 
                callback = self.parse, 
                errback = self.err_callback,
                headers = self.headers,
                )

    def parse(self, response): 
        if response.status == '404':
            raise CloseSpider('404')
            

        newslist = response.xpath('//ul[@class="linkNews"]/li')

        headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Accept-Language' : 'zh-CN,zh;q=0.9',
                'Accept-Encoding' : 'gzip, deflate',
                'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
            }

        for item in newslist:
            news = SinaNews()

            news['title'] = item.xpath('a/text()').extract_first(default = '')
            
            origin = item.xpath('a/@href').extract()
            if len(origin) > 0:
                news['origin'] = origin[0]
                
                self.logger.info('news uri: ' + origin[0])

                request = scrapy.Request(origin[0], 
                        callback = self.getContent_callback,
                        headers = headers)
                request.meta['item'] = news
                yield request

            #news['time'] = item.xpath('span/text()').extract()[0]  #timestamp

        if (self.generator == None):
            self.generator = self.requestUrlsGen()
        
        urls = next(self.generator)
        for url in urls:
            yield scrapy.Request(
                method = 'GET',
                url = url, 
                callback = self.parse, 
                errback = self.err_callback,
                headers = headers,
                )

    def getContent_callback(self, response):
        self.logger.info('getContent_callback')
        item = response.meta['item']
        
        if 'doc' in item['origin']:
            self.logger.info('parse2')
            
            item = response.meta['item']
        
            item['title'] = response.xpath('//h1[@class="main-title"]/text()').extract_first(default = '')
            self.logger.info(item['title'])

            item['source'] = response.xpath('//div[@class="date-source"]/a/text()').extract_first(default = '')
            
            item['content'] = response.xpath('normalize-space(//div[@class="article" and @id="article"])').extract_first(default = '')
            
            #item['author'] = 
            
            item['time'] = response.xpath('//span[@class="date"]/text()').extract_first(default = '')
            
            yield item
        else:
            self.logger.info('parse1')
            item = response.meta['item']

            item['title'] = response.xpath('//h1[@id="artibodyTitle"]/text()').extract()[0]
            
            self.logger.info(item['title'])
                
            item['time'] = response.xpath('//span[@id="pub_date"]/text()').extract_first(default = '')
            
            item['source'] = response.xpath('//span[@id="media_name"]/text()').extract_first(default = '')
            
            item['content'] = response.xpath('normalize-space(//div[@class="blkContainerSblkCon" and @id="artibody"])').extract_first(default = '')
            
            yield item

    # http://mil.news.sina.com.cn/2011-03-31/1342640379.html
    def content_parse1(self, response):
        item = response.meta['item']

        item['title'] = response.xpath('//h1[@id="artibodyTitle"]/text()').extract()[0]
        
        self.logger.info(item['title'])
            
        item['time'] = response.xpath('//span[@id="pub_date"]/text()').extract_first(default = '')
        
        item['source'] = response.xpath('//span[@id="media_name"]/text()').extract_first(default = '')
        
        item['content'] = response.xpath('normalize-space(//div[@class="blkContainerSblkCon" and @id="artibody"])').extract_first(default = '')
        
        yield item

    # http://mil.news.sina.com.cn/china/2018-05-06/doc-ihacuuvt8370960.shtml
    def content_parse2(self, response):
        item = response.meta['item']
        
        item['title'] = response.xpath('//h1[@class="main-title"]/text()').extract_first(default = '')
        self.logger.info(item['title'])

        item['source'] = response.xpath('//div[@class="date-source"]/a/text()').extract_first(default = '')
        
        item['content'] = response.xpath('normalize-space(//div[@class="article" and @id="article"])').extract_first(default = '')
        
        #item['author'] = 
        
        item['time'] = response.xpath('//span[@class="date"]/text()').extract_first(default = '')
        
        yield item

    def err_callback(self, failure):
        self.logger.error(repr(failure))

        if failure.check(HttpError):
            # these exceptions come from HttpError spider middleware
            # you can get the non-200 response
            response = failure.value.response
            self.logger.error('HttpError on %s', response.url)

        elif failure.check(DNSLookupError):
            # this is the original request
            request = failure.request
            self.logger.error('DNSLookupError on %s', request.url)

        elif failure.check(TimeoutError, TCPTimedOutError):
            request = failure.request
            self.logger.error('TimeoutError on %s', request.url)
 