# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from gov.items import GovItem
from gov.public import trans_time,sub
from readability.readability import Document
from lxml import etree
import time
import re
from gov.settings import KEYWORDS,START,END
class SgSpider(CrawlSpider):
    name = 'sg'
    allowed_domains = []
    # start_urls = [r'https://news.sogou.com/news?mode=1&manual=&query={}&time=0&sort=1&page=1&w=03009900&dr=1&_asf=news.sogou.com&_ast=1545903232'.format(kw) for kw in KEYWORDS]

    rules = (
        Rule(LinkExtractor(allow=r'&page=\d+'), callback='parse_item', follow=True),
    )
    def __init__(self,*args,**kwargs):
        super(SgSpider, self).__init__(*args,**kwargs)
        self.keywords = kwargs.get('keywords',KEYWORDS).split(',')
        print(self.keywords)
        self.start_urls = ['https://news.sogou.com/news?mode=1&manual=&query={}&time=0&sort=1&page=1&w=03009900&dr=1&_asf=news.sogou.com&_ast=1545903232'.format(kw) for kw in self.keywords]
        # rules = (
        # Rule(LinkExtractor(allow=r'&page=\d+'), callback='parse_item', follow=True),
        # )
        print(self.start_urls)
    def start_requests(self):
        for i in self.start_urls:
            yield scrapy.Request(url=i,callback=self.parse_item)
    def parse_item(self, response):
        results = response.xpath('//div[@id="main"]//div[@class="results"]/div[@class="vrwrap"]')[:-1]
        for wrap in results:
            item = GovItem()
            # a = wrap.xpath('.//a[contains(@id,"uigs_")]')[0]
            a = wrap.xpath('.//a[@id]')[0]
            item['title'] = ''.join(a.xpath('.//text()').extract())
            item['url'] = a.xpath('./@href').extract_first()
            # item['content'] = ''.join(wrap.xpath('.//span[contains(@id,"summary_")]//text()').extract())
            news_from = wrap.xpath('.//p[@class="news-from"]/text()').extract_first()

            item['source'] = news_from.split('\xa0')[0]
            _,item['date'] = trans_time(news_from.split('\xa0')[1])
            # yield item
            yield scrapy.Request(url=item['url'],meta={'item':item},callback=self.parse_content)
    def parse_content(self, response):
        item = response.meta['item']
        news_html = response.text
        document = Document(news_html)
        content = ''
        if document:
            readable_article = document.summary()
            if readable_article:
                html = etree.HTML(readable_article)
                content = sub(''.join(html.xpath('//*//text()')
                           )).replace('&nbsp', '')
        item['content'] = content
        yield item