# -*- coding: utf-8 -*-
import  datetime, re
from urllib.parse import urljoin
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import EpaperItem


class JjrbSpider(scrapy.Spider):
    def __init__(self, start_date=None, end_date=None, *args, **kwargs):
        super(JjrbSpider, self).__init__(*args, **kwargs)
        if start_date == None:
            self.start_date = datetime.date.today()
        else:
            self.start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
        if end_date == None:
            self.end_date = datetime.date.today()
        else:
            self.end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()

    name = 'jjrb'
    allowed_domains = ['ce.cn']
    url_profix = 'http://paper.ce.cn/jjrb/html/'
    node_html = 'node_2.htm'

    def start_requests(self):
        begin = self.start_date
        end = self.end_date
        d = begin
        delta = datetime.timedelta(days=1)
        while d <= end:
            day = d.strftime("%Y-%m/%d")
            d += delta
            url = self.url_profix + day + '/' + self.node_html
            self.logger.debug('待抓取版面列表' + url)
            yield scrapy.Request(url, self.parse_item)

    # 抓取版面列表
    def parse_item(self, response):
        url_htmlnames = response.xpath('//a[@id="pageLink"]/@href').getall()
        for htmlname in url_htmlnames:
            rurl = response.url
            url = rurl.rstrip(rurl.split('/')[-1]) + htmlname.lstrip('./')
            self.logger.debug('待抓取新闻列表页' + url)
            yield scrapy.Request(url, callback=self.parse_layout)

    # 抓取版面新闻列表
    def parse_layout(self, response):
        rurl = response.url
        html_text = response.text
        html_names = re.compile(r'content_\d{1,10}.htm').findall(html_text)
        for html_name in html_names:
            url = rurl.rstrip(rurl.split('/')[-1]) + html_name
            self.logger.debug('待抓取文章列表页' + url)
            yield scrapy.Request(url, callback=self.parse_article)
            
    # 抓取新闻内容
    def parse_article(self, response):
        item = EpaperItem()
        url = response.url
        html_text = response.text
        item['url'] = url
        item['papername'] = '经济日报'
        item['dirname'] = 'data/jjrb'
        tmp_url = ''.join(re.compile(r'\d{4}-\d{2}\/\d{2}').findall(url))
        item['date'] = tmp_url.replace('-','').replace('/','')
        tmp_url = ''.join(re.compile(r'_\d{1,10}\.').findall(url))
        item['article_id'] = tmp_url.lstrip('_').rstrip('.')
        item['title'] = response.xpath('//*[@class="font01"]/text()').get()
        item['profix'] = ''
        author = ''
        subtitle = ''
        font02s = response.xpath('//*[@class="font02"]').getall()
        for font02 in font02s:
            text = ''.join(re.compile(r'>(.*?)<').findall(font02))
            if 'style' in font02:
                subtitle = text
            else:
                author = text
        item['subtitle'] = subtitle
        item['author'] = author
        layout = ''.join(re.compile(r'[^\x00-\xff]{1}\d{2}[^\x00-\xff]{2}<STRONG>.*</STRONG>').findall(html_text))
        item['layout'] = layout.replace('<STRONG>', '').replace('</STRONG>', '')
        item['content'] = ''.join(response.xpath('//*[@id="ozoom"]').getall())
        item['images'] = self.parse_image(url, html_text)
        return item

    def parse_image(self, url, html_text):
        images = {}
        img_tags = re.compile(r'<IMG src=".*attpic.*">').findall(html_text)
        for img_tag in img_tags:
            image_url = img_tag.replace('<IMG src="', '')
            image_url = image_url.replace('">', '')
            image_url = urljoin(url, image_url)
            images[image_url] = ''
        return images