# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from gov.items import GovItem
from gov.public import trans_time,generate_url,sub
from readability.readability import Document
from lxml import etree
import time
import re
from gov.settings import KEYWORDS,START,END

class BdSpider(CrawlSpider):
    name = 'bd'
    # allowed_domains = []
    # start_urls = [generate_url(kw,START,END) for kw in KEYWORDS]

    rules = (
        Rule(LinkExtractor(allow=r'pn=\d+&',restrict_xpaths='//*[@id="page"]//a'), callback='parse_item', follow=True),
    )
    def __init__(self,*args,**kwargs):
        super(BdSpider, self).__init__(*args,**kwargs)
        self.keywords = kwargs.get('keywords',KEYWORDS).split(',')
        print(self.keywords)
        self.start_urls = [generate_url(kw,START,END) for kw in self.keywords]
        # self.rules = (Rule(LinkExtractor(allow=r'pn=\d+&',restrict_xpaths='//*[@id="page"]//a'), callback='parse_item', follow=True),)
        print(self.start_urls)

    def start_requests(self):
        for i in self.start_urls:
            yield scrapy.Request(url=i,callback=self.parse_item)

    def parse_item(self, response):
        print(response.url,'************')
        results = response.xpath('//*[@class="result"]')
        for div in results:
            item = GovItem()
            item['title'] = sub(div.xpath('string(./*[@class="c-title"]/a)').extract_first())
            item['url'] = div.xpath('./*[@class="c-title"]/a/@href').extract_first()
            author = div.xpath(
                'string(.//*[@class="c-author"])').extract_first().split('\xa0\xa0')
            if len(author) == 2:
                item['source'] = sub(author[0])
                _,item['date']= trans_time(author[1].replace('\t', '').replace('\n', ''))
            else:
                item['source'] = ''
                _,item['date']= trans_time(
                    author[0].replace('\t', '').replace('\n', ''))
            # item['content'] = sub(''.join(div.xpath(
            #                 './/*[@class="c-author"]/../text() | .//*[@class="c-author"]/../em/text()').extract()))
            # yield item
            yield scrapy.Request(url=item['url'],meta={'item':item},callback=self.parse_content)

    def parse_content(self, response):
        item = response.meta['item']
        news_html = response.text
        document = Document(news_html)
        content = ''
        if document:
            readable_article = document.summary()
            if readable_article:
                html = etree.HTML(readable_article)
                content = sub(''.join(html.xpath('//*//text()')
                           )).replace('&nbsp', '')
        item['content'] = content
        yield item