# -*- coding: utf-8 -*-
import  datetime, re
from urllib.parse import urljoin
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import EpaperItem


class MrdxSpider(scrapy.Spider):
    def __init__(self, start_date=None, end_date=None, *args, **kwargs):
        super(MrdxSpider, self).__init__(*args, **kwargs)
        if start_date == None:
            self.start_date = datetime.date.today()
        else:
            self.start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
        if end_date == None:
            self.end_date = datetime.date.today()
        else:
            self.end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()

    name = 'mrdx'
    allowed_domains = ['mrdx.cn']
    url_profix = 'http://mrdx.cn/content/'
    node_html = 'Page01DK.htm'
    # 2019-9-30改版为目前的样式
    node_html_old = 'Page01HO.htm'

    def start_requests(self):
        begin = self.start_date
        end = self.end_date
        d = begin
        delta = datetime.timedelta(days=1)
        while d <= end:
            day = d.strftime("%Y%m%d")
            d += delta
            url = self.url_profix + day + '/' + self.node_html
            self.logger.debug('待抓取每日列表' + url)
            yield scrapy.Request(url, self.parse_layout)

    # 抓取版面新闻列表
    def parse_layout(self, response):
        rurl = response.url
        url_htmlnames = response.xpath('//*/a').getall()
        for htmlname in url_htmlnames:
            tmp_url = re.search(r'Articel\d{1,10}[A-Z]{2}.htm', htmlname)
            if not tmp_url is None:
                url = rurl.rstrip(rurl.split('/')[-1]) + tmp_url.group()
                self.logger.debug('待抓取文章列表页' + url)
                yield scrapy.Request(url, callback=self.parse_article)

    # 抓取新闻内容
    def parse_article(self, response):
        item = EpaperItem()
        url = response.url
        item['url'] = url
        item['papername'] = '新华每日电讯'
        item['dirname'] = 'data/mrdx'
        item['date'] = ''.join(re.compile(r'\d{8}').findall(url))
        item['article_id'] = ''.join(re.compile(r'Articel\d{1,10}[A-Z]{2}').findall(url))
        item['title'] = response.xpath('//*[@id="contenttext"]/table/tbody/tr[2]/td/div/strong/font/text()').get()
        item['profix'] = ''
        item['subtitle'] = ''
        item['author'] = ''
        item['layout'] = response.xpath('//*[@id="contenttext"]/table/tbody/tr[5]/td/text()').get()
        content = ''.join(response.xpath('//*[@id="contenttext"]/div[2]').getall())
        item['content'] = content
        item['images'] = self.parse_image(url, content)
        return item

    def parse_image(self, url, html_text):
        images = {}
        image_url = ''.join(re.compile(r'<img src=\"?.*?\"').findall(html_text))
        image_url = image_url.lstrip('<img src="').rstrip('"')
        if image_url is None or image_url == '':
            return images
        image_url = urljoin(url, image_url)
        image_text = ''
        images[image_url] = image_text
        return images