# -*- coding: utf-8 -*-
import  datetime, re
from urllib.parse import urljoin
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import EpaperItem


class WorkercnSpider(scrapy.Spider):

    def __init__(self, start_date=None, end_date=None, *args, **kwargs):
        super(WorkercnSpider, self).__init__(*args, **kwargs)
        if start_date == None:
            self.start_date = datetime.date.today()
        else:
            self.start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
        if end_date == None:
            self.end_date = datetime.date.today()
        else:
            self.end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()

    name = 'workercn'
    allowed_domains = ['workercn.cn']
    url_profix = 'http://media.workercn.cn/sites/media/grrb/'
    node_html = 'GR0100.htm'
    paper_name = '工人日报'
    dir_name = 'data/workercn'

    def start_requests(self):
        begin = self.start_date
        end = self.end_date
        d = begin
        delta = datetime.timedelta(days=1)
        while d <= end:
            day = d.strftime("%Y_%m/%d")
            d += delta
            url = self.url_profix + day + '/' + self.node_html
            # self.logger.debug('待抓取版面列表' + url)
            yield scrapy.Request(url, self.parse_item)

    # 抓取版面列表
    def parse_item(self, response):
        url_htmlnames = response.xpath('//div[@id="page_content"]/table/tr/td/a/@href').getall()
        for htmlname in url_htmlnames:
            url = response.url.rstrip(self.node_html) + htmlname.lstrip('./')
            # self.logger.debug('待抓取新闻列表页' + url)
            yield scrapy.Request(url, callback=self.parse_layout)

    # 抓取版面新闻列表
    def parse_layout(self, response):
        url_htmlnames = response.xpath('//*[@id="secid_CA15abf508576346f9aa0def773901d41d"]/ul/li/a/@href').getall()
        rurl = response.url
        for htmlname in url_htmlnames:
            url = rurl.rstrip(rurl.split('/')[-1]) + htmlname
            self.logger.debug('待抓取文章列表页' + url)
            yield scrapy.Request(url, callback=self.parse_article)

    # 抓取新闻内容
    def parse_article(self, response):
        item = EpaperItem()
        url = response.url
        html_text = response.text
        item['url'] = url
        item['papername'] = self.paper_name
        item['dirname'] = self.dir_name
        tmp_url = ''.join(re.compile(r'\d{4}_\d{2}\/\d{2}').findall(url))
        item['date'] = tmp_url.replace('_','').replace('/','')
        tmp_url = ''.join(re.compile(r'GR\d{1,10}\.').findall(url))
        item['article_id'] = tmp_url.rstrip('.')
        item['title'] = response.xpath('//*[@id="secid_CAa3f607622dc249df898acbb3debeec45"]/div/div/h1/text()').get()
        item['profix'] = response.xpath('//*[@id="secid_CAa3f607622dc249df898acbb3debeec45"]/div/div/h3/text()').get()
        item['subtitle'] = response.xpath('//*[@id="secid_CAa3f607622dc249df898acbb3debeec45"]/div/div/h2/text()').get()
        item['author'] = ''
        item['layout'] = response.xpath('//div[@class="lai"]/span/text()').get()
        content = ''.join(response.xpath('//div[@id="ozoom"]').getall())
        item['content'] = content
        item['images'] = self.parse_image(url, content)
        return item

    def parse_image(self, url, content):
        images = {}
        imgs = re.compile(r'\bsrc\b\s*=\s*[\'\"]?([^\'\"]*)[\'\"]?').findall(content)
        for u in imgs:
            u = u.lstrip('src="').rstrip('"')
            image_url = urljoin(url, u)
            images[image_url] = ''
        return images