# -*- coding: utf-8 -*-
import  datetime, re
from urllib.parse import urljoin
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import EpaperItem


class JfjbSpider(scrapy.Spider):
    def __init__(self, start_date=None, end_date=None, *args, **kwargs):
        super(JfjbSpider, self).__init__(*args, **kwargs)
        if start_date == None:
            self.start_date = datetime.date.today()
        else:
            self.start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
        if end_date == None:
            self.end_date = datetime.date.today()
        else:
            self.end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()

    name = 'jfjb'
    allowed_domains = ['81.cn']
    url_profix = 'http://www.81.cn/jfjbmap/content/'
    node_html = 'node_2.htm'

    def start_requests(self):
        begin = self.start_date
        end = self.end_date
        d = begin
        delta = datetime.timedelta(days=1)
        while d <= end:
            day = d.strftime("%Y-%m/%d")
            d += delta
            url = self.url_profix + day + '/' + self.node_html
            self.logger.debug('待抓取版面列表' + url)
            yield scrapy.Request(url, self.parse_item)

    # 抓取版面列表
    def parse_item(self, response):
        rurl = response.url
        url_htmlnames = response.xpath('//*[@id="APP-SectionNav"]').getall()
        a_tags = re.compile(r'<a[\s]{1}href=\"([^<>"\']*)\"\.*>').findall(''.join(url_htmlnames))
        for htmlname in a_tags:
            url = rurl.rstrip(rurl.split('/')[-1]) + htmlname.lstrip('./')
            self.logger.debug('待抓取新闻列表页' + url)
            yield scrapy.Request(url, callback=self.parse_layout)
    
    # 抓取版面新闻列表
    def parse_layout(self, response):
        url_htmlnames = response.xpath('//*[@id="APP-NewsList"]/li/a/@href').getall()
        rurl = response.url
        for htmlname in url_htmlnames:
            url = rurl.rstrip(rurl.split('/')[-1]) + htmlname
            self.logger.debug('待抓取文章列表页' + url)
            yield scrapy.Request(url, callback=self.parse_article)
    
    # 抓取新闻内容
    def parse_article(self, response):
        item = EpaperItem()
        url = response.url
        item['url'] = url
        item['papername'] = '解放军报'
        item['dirname'] = 'data/jfjb'
        tmp_url = ''.join(re.compile(r'\d{4}-\d{2}\/\d{2}').findall(url))
        item['date'] = tmp_url.replace('-','').replace('/','')
        tmp_url = ''.join(re.compile(r'_\d{1,10}\.').findall(url))
        item['article_id'] = tmp_url.lstrip('_').rstrip('.')
        item['title'] = response.xpath('//*[@id="APP-Title"]/text()').get()
        profix = response.xpath('//*[@id="APP-PreTitle"]/text()').get()
        if profix is None:
            profix = ''
        item['profix'] = profix
        subtitle = response.xpath('//*[@id="APP-Subtitle"]/text()').get()
        if subtitle is None:
            subtitle = ''
        item['subtitle'] = subtitle
        author = response.xpath('//*[@id="APP-Author"]/text()').get()
        if author is None:
            author = ''
        item['author'] = author
        item['layout'] = response.xpath('/html/body/div[2]/div[1]/div[2]/span/text()').get()
        item['content'] = response.xpath('//*[@id="APP-Content"]').get()
        item['images'] = self.parse_image(url, response.xpath('/html/body/div[2]/div[2]/div[3]/div[2]/div').getall())
        # self.logger.debug(item)
        return item

    def parse_image(self, url, html_text):
        images = {}
        for image in html_text:
            self.logger.debug(image)
            image_url = ''.join(re.compile(r'src=\"?.*?\">').findall(image))
            image_url = image_url.lstrip('src="').rstrip('">')
            image_url = urljoin(url, image_url)
            image_text = re.compile(r'<td([^<]+)?>([^<]+)</td>').findall(image)
            text = ''
            if image_text is None:
                text = ''
            else:
                self.logger.debug(type(image_text))
                text = ''.join(image_text)
                text = text.lstrip('<td>').rstrip('</td>')
            images[image_url] = text
        return images