# coding=utf-8
import logging
import sys
import datetime
from scrapy.linkextractors.sgml import SgmlLinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapy_sinahealth import utils
from scrapy_sinahealth.items import ScrapyHealthItem

reload(sys);
sys.setdefaultencoding("utf-8");

class TwentyFirstJingji(CrawlSpider):
    name = "21jingji";
    download_delay = 1;
    allowed_domains = ["epaper.21jingji.com"];
    start_date = "2018-02-01"
    end_date = "2018-02-12"

    rules = (
        Rule(SgmlLinkExtractor(allow=(r'http://epaper.21jingji.com/html/\d\d\d\d-\d\d/\d\d/content_\d+.htm',)), callback='parse_content'),
    );

    def get_between_days(self, begin_date, end_date):
        date_list = []
        begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
        end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
        # end_date = datetime.datetime.strptime(time.strftime('%Y-%m-%d', time.localtime(time.time())), "%Y-%m-%d")
        while begin_date <= end_date:
            date_str = begin_date.strftime("%Y-%m/%d")
            date_list.append(date_str)
            begin_date += datetime.timedelta(days=1)
        return date_list

    def generate_url(self, start_date, end_date):
        base_url = "http://epaper.21jingji.com/html/%s/node_1.htm"
        day_list = self.get_between_days(start_date, end_date)
        return [base_url % d for d in day_list]

    def __init__(self):
        super(TwentyFirstJingji, self).__init__();
        start_urls = self.generate_url(self.start_date, self.end_date)
        self.start_urls = start_urls
        # urls = TwentyFirstJingji.generate_urls(years, months);
        # self.start_urls = [
        #     "http://epaper.21jingji.com/html/2018-01/01/node_1.htm",  # 头版
        # ];

    def parse_content(self, response):
        logging.info("processing content url %s..." % response.url)
        item = ScrapyHealthItem();

        item["url"] = response.url.strip();
        item["referer"] = response.request.headers['Referer'].strip();
        item["spider"] = self.name
        item["category"] = "Economic";
        item["tag"] = ""

        title = utils.select_default(response, '//h1[@class="news_title"]/text()')
        item['title'] = title;

        content = utils.select_default(response, '//div[@class="news_content"]')
        item['content'] = content;

        issue_date = utils.select_default(response, '//div[@class="listbox_title"]/div[@class="data"]/text()')
        item['issue_date'] = issue_date;

        item['source'] = '21世纪经济报'.decode('utf-8')

        yield item;