# -*- coding: utf-8 -*-
import  datetime, re
from urllib.parse import urljoin
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import EpaperItem


class StdailySpider(scrapy.Spider):
    def __init__(self, start_date=None, end_date=None, *args, **kwargs):
        super(StdailySpider, self).__init__(*args, **kwargs)
        if start_date == None:
            self.start_date = datetime.date.today()
        else:
            self.start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
        if end_date == None:
            self.end_date = datetime.date.today()
        else:
            self.end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()

    name = 'stdaily'
    allowed_domains = ['stdaily.com']
    url_profix = 'http://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/'
    node_html = 'node_2.htm'
    paper_name = '科技日报'
    dir_name = 'data/stdaily'

    def start_requests(self):
        begin = self.start_date
        end = self.end_date
        d = begin
        delta = datetime.timedelta(days=1)
        while d <= end:
            day = d.strftime("%Y-%m/%d")
            d += delta
            url = self.url_profix + day + '/' + self.node_html
            self.logger.debug('待抓取版面列表' + url)
            yield scrapy.Request(url, self.parse_item)

    # 抓取版面列表
    def parse_item(self, response):
        url_htmlnames = response.xpath('//a[@id="pageLink"]/@href').getall()
        for htmlname in url_htmlnames:
            url = response.url.rstrip(self.node_html) + htmlname.lstrip('./')
            self.logger.debug('待抓取新闻列表页' + url)
            yield scrapy.Request(url, callback=self.parse_layout)

    # 抓取版面新闻列表
    def parse_layout(self, response):
        url_htmlnames = response.xpath('//div[@class="title"]/ul/li/a/@href').getall()
        rurl = response.url
        for htmlname in url_htmlnames:
            url = rurl.rstrip(rurl.split('/')[-1]) + htmlname
            self.logger.debug('待抓取文章列表页' + url)
            yield scrapy.Request(url, callback=self.parse_article)

    # 抓取新闻内容
    def parse_article(self, response):
        item = EpaperItem()
        url = response.url
        html_text = response.text
        item['url'] = url
        item['papername'] = self.paper_name
        item['dirname'] = self.dir_name
        tmp_url = ''.join(re.compile(r'\d{4}-\d{2}\/\d{2}').findall(url))
        item['date'] = tmp_url.replace('-','').replace('/','')
        tmp_url = ''.join(re.compile(r'_\d{1,10}\.').findall(url))
        item['article_id'] = tmp_url.lstrip('_').rstrip('.')
        item['title'] = response.xpath('//div[@class="biaoti"]/text()').get()
        item['profix'] = response.xpath('//div[@class="yinti"]/text()').get()
        item['subtitle'] = response.xpath('//div[@class="futi"]/text()').get()
        item['author'] = response.xpath('//div[@class="author"]/text()').get()
        item['layout'] = response.xpath('//div[@class="banci"]/text()').get()
        item['content'] = ''.join(response.xpath('//div[@id="ozoom"]/p').getall())
        imgs = response.xpath('//div[@class="picture"]/table/tbody/tr/td/img/@src').getall()
        item['images'] = self.parse_image(url, imgs)
        return item

    def parse_image(self, url, imgs):
        images = {}
        for u in imgs:
            image_url = urljoin(url, u)
            self.logger.debug(image_url)
            images[image_url] = ''
        return images